python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2020 NVIDIA Corporation
# SPDX-License-Identifier: Apache-2.0
print('static const size_t nodeSizes[3][6] = {')
for type in ['float', 'Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'sizeof(InternalNode<LeafNode<{type}>, {ld}>)', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('};')
print('static const size_t leafSizes[3][6] = {')
for type in ['float', 'Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'sizeof(LeafNode<{type}, Coord, Mask, {ld}>)', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('};')
print('static const Node2RangeFunc rangeFunctions[3][6] = {')
for type in ['float', 'Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'GetNode2Range<{type}, {ld}>', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('};')
print('static const __device__ ProcessLeafFunc processLeafFuncs[3][6] = {')
for type in ['float', 'nanovdb::Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'ProcessLeaf<{type}, {ld}>', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('};')
print('static const __device__ NodeRangeFunc rangeFunctions[2][3][6] = {{')
for type in ['float', 'nanovdb::Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'GetNodeRange<LeafNodeSmpl<{type}, {ld}>>', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('},{',)
for type in ['float', 'nanovdb::Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
# Using 3 for the leaf node's LOG2DIM suffices here
print(f'GetNodeRange<INodeSmpl<{type}, {ld}>>', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('}};')
print('static const __device__ ProcessInternalNodeFunc processInternalNodeFuncs[3][6] = {')
for type in ['float', 'nanovdb::Vec3f', 'int']:
print('{', end='')
for ld in range(2, 8):
print(f'ProcessInternalNode<{type}, {ld}>', end='')
if ld != 7:
print(', ', end='')
if(type == 'int'):
print('}')
else:
print('},')
print('};') | gvdb-voxels-master | source/gNanoVDB/generateNodeTables.py |
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| swift-master | setup.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from contextlib import contextmanager
import os
from six import reraise
from unittest.util import safe_repr
import warnings
warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=(
'Python 2 is no longer supported by the Python core team. '
'Support for it is now deprecated in cryptography, '
'and will be removed in a future release.'))
warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=(
'Python 2 is no longer supported by the Python core team. '
'Support for it is now deprecated in cryptography, '
'and will be removed in the next release.'))
warnings.filterwarnings('ignore', message=(
'Python 3.6 is no longer supported by the Python core team. '
'Therefore, support for it is deprecated in cryptography '
'and will be removed in a future release.'))
import unittest
if sys.version_info < (3, 2):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
unittest.TestCase.assertRegex = unittest.TestCase.assertRegexpMatches
from eventlet.green import socket
from swift.common.utils import readconf
# Work around what seems to be a Python bug.
# c.f. https://bugs.launchpad.net/swift/+bug/820185.
import logging
logging.raiseExceptions = False
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except IOError:
if not os.path.exists(config_file):
print('Unable to read test config %s - file not found'
% config_file, file=sys.stderr)
elif not os.access(config_file, os.R_OK):
print('Unable to read test config %s - permission denied'
% config_file, file=sys.stderr)
except ValueError as e:
print(e)
return config
def listen_zero():
"""
The eventlet.listen() always sets SO_REUSEPORT, so when called with
("localhost",0), instead of returning unique ports it can return the
same port twice. That causes our tests to fail, so open-code it here
without SO_REUSEPORT.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
sock.listen(50)
return sock
@contextmanager
def annotate_failure(msg):
"""
Catch AssertionError and annotate it with a message. Useful when making
assertions in a loop where the message can indicate the loop index or
richer context about the failure.
:param msg: A message to be prefixed to the AssertionError message.
"""
try:
yield
except AssertionError as err:
err_typ, err_val, err_tb = sys.exc_info()
if err_val.args:
msg = '%s Failed with %s' % (msg, err_val.args[0])
err_val.args = (msg, ) + err_val.args[1:]
else:
# workaround for some IDE's raising custom AssertionErrors
err_val = '%s Failed with %s' % (msg, err)
err_typ = AssertionError
reraise(err_typ, err_val, err_tb)
class BaseTestCase(unittest.TestCase):
def _assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
# This is almost identical to the method in python3.4 version of
# unitest.case.TestCase.assertDictContainsSubset, reproduced here to
# avoid the deprecation warning in the original when using python3.
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
| swift-master | test/__init__.py |
# Copyright (c) 2010-2021 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import mock
import sys
from collections import defaultdict
from swift.common import utils
from swift.common.utils import NOTICE
class WARN_DEPRECATED(Exception):
def __init__(self, msg):
self.msg = msg
print(self.msg)
class FakeStatsdClient(utils.StatsdClient):
def __init__(self, host, port, base_prefix='', tail_prefix='',
default_sample_rate=1, sample_rate_factor=1, logger=None):
super(FakeStatsdClient, self).__init__(
host, port, base_prefix, tail_prefix, default_sample_rate,
sample_rate_factor, logger)
self.clear()
# Capture then call parent pubic stat functions
self.update_stats = self._capture("update_stats")
self.increment = self._capture("increment")
self.decrement = self._capture("decrement")
self.timing = self._capture("timing")
self.timing_since = self._capture("timing_since")
self.transfer_rate = self._capture("transfer_rate")
def _capture(self, func_name):
func = getattr(super(FakeStatsdClient, self), func_name)
def wrapper(*args, **kwargs):
self.calls[func_name].append((args, kwargs))
return func(*args, **kwargs)
return wrapper
def _determine_sock_family(self, host, port):
return None, None
def _open_socket(self):
return self
# sendto and close are mimicing the socket calls.
def sendto(self, msg, target):
self.sendto_calls.append((msg, target))
def close(self):
pass
def _send(self, *args, **kwargs):
self.send_calls.append((args, kwargs))
super(FakeStatsdClient, self)._send(*args, **kwargs)
def clear(self):
self.send_calls = []
self.calls = defaultdict(list)
self.sendto_calls = []
def get_increments(self):
return [call[0][0] for call in self.calls['increment']]
def get_increment_counts(self):
# note: this method reports the sum of stats sent via the increment
# method only; consider using get_stats_counts instead to get the sum
# of stats sent via both the increment and update_stats methods
counts = defaultdict(int)
for metric in self.get_increments():
counts[metric] += 1
return counts
def get_update_stats(self):
return [call[0][:2] for call in self.calls['update_stats']]
def get_stats_counts(self):
counts = defaultdict(int)
for metric, step in self.get_update_stats():
counts[metric] += step
return counts
class CaptureLog(object):
"""
Captures log records passed to the ``handle`` method and provides accessor
functions to the captured logs.
"""
def __init__(self):
self.clear()
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
return 0
def handle(self, record):
return self._handle(record)
class FakeLogger(logging.Logger, CaptureLog):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = FakeStatsdClient("host", 8125)
self.thread_locals = None
self.parent = None
# ensure the NOTICE level has been named, in case it has not already
# been set
logging.addLevelName(NOTICE, 'NOTICE')
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def clear(self):
self._clear()
self.statsd_client.clear()
def close(self):
self.clear()
def warn(self, *args, **kwargs):
raise WARN_DEPRECATED("Deprecated Method warn use warning instead")
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def setFormatter(self, obj):
self.formatter = obj
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def flush(self):
pass
def handleError(self, record):
pass
def isEnabledFor(self, level):
return True
class DebugSwiftLogFormatter(utils.SwiftLogFormatter):
def format(self, record):
msg = super(DebugSwiftLogFormatter, self).format(record)
return msg.replace('#012', '\n')
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = DebugSwiftLogFormatter(
"%(server)s %(levelname)s: %(message)s")
self.records = defaultdict(list)
def handle(self, record):
self._handle(record)
formatted = self.formatter.format(record)
print(formatted)
self.records[record.levelname].append(formatted)
class DebugLogAdapter(utils.LogAdapter):
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
class ForwardingLogHandler(logging.NullHandler):
"""
Provides a LogHandler implementation that simply forwards filtered records
to a given handler function. This can be useful to forward records to a
handler without the handler itself needing to subclass LogHandler.
"""
def __init__(self, handler_fn):
super(ForwardingLogHandler, self).__init__()
self.handler_fn = handler_fn
def handle(self, record):
return self.handler_fn(record)
class CaptureLogAdapter(utils.LogAdapter, CaptureLog):
"""
A LogAdapter that is capable of capturing logs for inspection via accessor
methods.
"""
def __init__(self, logger, name):
super(CaptureLogAdapter, self).__init__(logger, name)
self.clear()
self.handler = ForwardingLogHandler(self.handle)
def start_capture(self):
"""
Attaches the adapter's handler to the adapted logger in order to start
capturing log messages.
"""
self.logger.addHandler(self.handler)
def stop_capture(self):
"""
Detaches the adapter's handler from the adapted logger. This should be
called to prevent further logging to the adapted logger (possibly via
other log adapter instances) being captured by this instance.
"""
self.logger.removeHandler(self.handler)
@contextlib.contextmanager
def capture_logger(conf, *args, **kwargs):
"""
Yields an adapted system logger based on the conf options. The log adapter
captures logs in order to support the pattern of tests calling the log
accessor methods (e.g. get_lines_for_level) directly on the logger
instance.
"""
with mock.patch('swift.common.utils.LogAdapter', CaptureLogAdapter):
log_adapter = utils.get_logger(conf, *args, **kwargs)
log_adapter.start_capture()
try:
yield log_adapter
finally:
log_adapter.stop_capture()
| swift-master | test/debug_logger.py |
# Copyright (c) 2022 Nvidia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
from test.s3api import BaseS3TestCase
class AlwaysAbsoluteURLProxyConfig(object):
def __init__(self):
self.settings = {'proxy_use_forwarding_for_https': True}
def proxy_url_for(self, request_url):
return request_url
def proxy_headers_for(self, proxy_url):
return {}
class TestRequestTargetStyle(BaseS3TestCase):
def setUp(self):
self.client = self.get_s3_client(1)
if not self.client._endpoint.host.startswith('https:'):
raise SkipTest('Absolute URL test requires https')
self.bucket_name = self.create_name('test-address-style')
resp = self.client.create_bucket(Bucket=self.bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
def tearDown(self):
self.clear_bucket(self.client, self.bucket_name)
super(TestRequestTargetStyle, self).tearDown()
def test_absolute_url(self):
sess = self.client._endpoint.http_session
sess._proxy_config = AlwaysAbsoluteURLProxyConfig()
self.assertEqual({'use_forwarding_for_https': True},
sess._proxies_kwargs())
resp = self.client.list_buckets()
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertIn(self.bucket_name, {
info['Name'] for info in resp['Buckets']})
| swift-master | test/s3api/test_request_target_style.py |
# Copyright (c) 2019 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.s3api import BaseS3TestCase, ConfigError
class TestGetServiceSigV4(BaseS3TestCase):
def test_empty_service(self):
def do_test(client):
access_key = client._request_signer._credentials.access_key
resp = client.list_buckets()
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual([], resp['Buckets'])
self.assertIn('x-amz-request-id',
resp['ResponseMetadata']['HTTPHeaders'])
self.assertIn('DisplayName', resp['Owner'])
self.assertEqual(access_key, resp['Owner']['DisplayName'])
self.assertIn('ID', resp['Owner'])
client = self.get_s3_client(1)
do_test(client)
try:
client = self.get_s3_client(3)
except ConfigError:
pass
else:
do_test(client)
def test_service_with_buckets(self):
c = self.get_s3_client(1)
buckets = [self.create_name('bucket%s' % i) for i in range(5)]
for bucket in buckets:
c.create_bucket(Bucket=bucket)
resp = c.list_buckets()
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual(sorted(buckets), [
bucket['Name'] for bucket in resp['Buckets']])
self.assertTrue(all('CreationDate' in bucket
for bucket in resp['Buckets']))
self.assertIn('x-amz-request-id',
resp['ResponseMetadata']['HTTPHeaders'])
self.assertIn('DisplayName', resp['Owner'])
access_key = c._request_signer._credentials.access_key
self.assertEqual(access_key, resp['Owner']['DisplayName'])
self.assertIn('ID', resp['Owner'])
# Second user can only see its own buckets
try:
c2 = self.get_s3_client(2)
except ConfigError as err:
raise unittest.SkipTest(str(err))
buckets2 = [self.create_name('bucket%s' % i) for i in range(2)]
for bucket in buckets2:
c2.create_bucket(Bucket=bucket)
self.assertEqual(sorted(buckets2), [
bucket['Name'] for bucket in c2.list_buckets()['Buckets']])
# Unprivileged user can't see anything
try:
c3 = self.get_s3_client(3)
except ConfigError as err:
raise unittest.SkipTest(str(err))
self.assertEqual([], c3.list_buckets()['Buckets'])
class TestGetServiceSigV2(TestGetServiceSigV4):
signature_version = 's3'
class TestGetServicePresignedV2(TestGetServiceSigV4):
signature_version = 's3-query'
class TestGetServicePresignedV4(TestGetServiceSigV4):
signature_version = 's3v4-query'
| swift-master | test/s3api/test_service.py |
# Copyright (c) 2019 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
import uuid
import time
import boto3
from botocore.exceptions import ClientError
from six.moves import urllib
from swift.common.utils import config_true_value, readconf
from test import get_config
_CONFIG = None
# boto's loggign can get pretty noisy; require opt-in to see it all
if not config_true_value(os.environ.get('BOTO3_DEBUG')):
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
class ConfigError(Exception):
'''Error test conf misconfigurations'''
def load_aws_config(conf_file):
"""
Read user credentials from an AWS CLI style credentials file and translate
to a swift test config. Currently only supports a single user.
:param conf_file: path to AWS credentials file
"""
conf = readconf(conf_file, 'default')
global _CONFIG
_CONFIG = {
'endpoint': 'https://s3.amazonaws.com',
'region': 'us-east-1',
'access_key1': conf.get('aws_access_key_id'),
'secret_key1': conf.get('aws_secret_access_key'),
'session_token1': conf.get('aws_session_token')
}
aws_config_file = os.environ.get('SWIFT_TEST_AWS_CONFIG_FILE')
if aws_config_file:
load_aws_config(aws_config_file)
print('Loaded test config from %s' % aws_config_file)
def get_opt_or_error(option):
global _CONFIG
if _CONFIG is None:
_CONFIG = get_config('s3api_test')
value = _CONFIG.get(option)
if not value:
raise ConfigError('must supply [s3api_test]%s' % option)
return value
def get_opt(option, default=None):
try:
return get_opt_or_error(option)
except ConfigError:
return default
def get_s3_client(user=1, signature_version='s3v4', addressing_style='path'):
'''
Get a boto3 client to talk to an S3 endpoint.
:param user: user number to use. Should be one of:
1 -- primary user
2 -- secondary user
3 -- unprivileged user
:param signature_version: S3 signing method. Should be one of:
s3 -- v2 signatures; produces Authorization headers like
``AWS access_key:signature``
s3-query -- v2 pre-signed URLs; produces query strings like
``?AWSAccessKeyId=access_key&Signature=signature``
s3v4 -- v4 signatures; produces Authorization headers like
``AWS4-HMAC-SHA256
Credential=access_key/date/region/s3/aws4_request,
Signature=signature``
s3v4-query -- v4 pre-signed URLs; produces query strings like
``?X-Amz-Algorithm=AWS4-HMAC-SHA256&
X-Amz-Credential=access_key/date/region/s3/aws4_request&
X-Amz-Signature=signature``
:param addressing_style: One of:
path -- produces URLs like ``http(s)://host.domain/bucket/key``
virtual -- produces URLs like ``http(s)://bucket.host.domain/key``
'''
endpoint = get_opt('endpoint', None)
if endpoint:
scheme = urllib.parse.urlsplit(endpoint).scheme
if scheme not in ('http', 'https'):
raise ConfigError('unexpected scheme in endpoint: %r; '
'expected http or https' % scheme)
else:
scheme = None
region = get_opt('region', 'us-east-1')
access_key = get_opt_or_error('access_key%d' % user)
secret_key = get_opt_or_error('secret_key%d' % user)
session_token = get_opt('session_token%d' % user)
ca_cert = get_opt('ca_cert')
if ca_cert is not None:
try:
# do a quick check now; it's more expensive to have boto check
os.stat(ca_cert)
except OSError as e:
raise ConfigError(str(e))
return boto3.client(
's3',
endpoint_url=endpoint,
region_name=region,
use_ssl=(scheme == 'https'),
verify=ca_cert,
config=boto3.session.Config(s3={
'signature_version': signature_version,
'addressing_style': addressing_style,
}),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token
)
TEST_PREFIX = 's3api-test-'
class BaseS3TestCase(unittest.TestCase):
# Default to v4 signatures (as aws-cli does), but subclasses can override
signature_version = 's3v4'
@classmethod
def get_s3_client(cls, user):
return get_s3_client(user, cls.signature_version)
@classmethod
def _remove_all_object_versions_from_bucket(cls, client, bucket_name):
resp = client.list_object_versions(Bucket=bucket_name)
objs_to_delete = (resp.get('Versions', []) +
resp.get('DeleteMarkers', []))
while objs_to_delete:
multi_delete_body = {
'Objects': [
{'Key': obj['Key'], 'VersionId': obj['VersionId']}
for obj in objs_to_delete
],
'Quiet': False,
}
del_resp = client.delete_objects(Bucket=bucket_name,
Delete=multi_delete_body)
if any(del_resp.get('Errors', [])):
raise Exception('Unable to delete %r' % del_resp['Errors'])
if not resp['IsTruncated']:
break
key_marker = resp['NextKeyMarker']
version_id_marker = resp['NextVersionIdMarker']
resp = client.list_object_versions(
Bucket=bucket_name, KeyMarker=key_marker,
VersionIdMarker=version_id_marker)
objs_to_delete = (resp.get('Versions', []) +
resp.get('DeleteMarkers', []))
@classmethod
def clear_bucket(cls, client, bucket_name):
timeout = time.time() + 10
backoff = 0.1
cls._remove_all_object_versions_from_bucket(client, bucket_name)
try:
client.delete_bucket(Bucket=bucket_name)
except ClientError as e:
if 'NoSuchBucket' in str(e):
return
if 'BucketNotEmpty' not in str(e):
raise
# Something's gone sideways. Try harder
client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={'Status': 'Suspended'})
while True:
cls._remove_all_object_versions_from_bucket(
client, bucket_name)
# also try some version-unaware operations...
for key in client.list_objects(Bucket=bucket_name).get(
'Contents', []):
client.delete_object(Bucket=bucket_name, Key=key['Key'])
# *then* try again
try:
client.delete_bucket(Bucket=bucket_name)
except ClientError as e:
if 'NoSuchBucket' in str(e):
return
if 'BucketNotEmpty' not in str(e):
raise
if time.time() > timeout:
raise Exception('Timeout clearing %r' % bucket_name)
time.sleep(backoff)
backoff *= 2
else:
break
def create_name(self, slug):
return '%s%s-%s' % (TEST_PREFIX, slug, uuid.uuid4().hex)
@classmethod
def clear_account(cls, client):
for bucket in client.list_buckets()['Buckets']:
if not bucket['Name'].startswith(TEST_PREFIX):
# these tests run against real s3 accounts
continue
cls.clear_bucket(client, bucket['Name'])
def tearDown(self):
client = self.get_s3_client(1)
self.clear_account(client)
try:
client = self.get_s3_client(2)
except ConfigError:
pass
else:
self.clear_account(client)
| swift-master | test/s3api/__init__.py |
# Copyright (c) 2019 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
from botocore.exceptions import ClientError
import six
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import md5
from test.s3api import BaseS3TestCase
def retry(f, timeout=10):
timelimit = time.time() + timeout
while True:
try:
f()
except (ClientError, AssertionError):
if time.time() > timelimit:
raise
continue
else:
break
class TestObjectVersioning(BaseS3TestCase):
maxDiff = None
def setUp(self):
self.client = self.get_s3_client(1)
self.bucket_name = self.create_name('versioning')
resp = self.client.create_bucket(Bucket=self.bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
def enable_versioning():
resp = self.client.put_bucket_versioning(
Bucket=self.bucket_name,
VersioningConfiguration={'Status': 'Enabled'})
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
retry(enable_versioning)
def tearDown(self):
resp = self.client.put_bucket_versioning(
Bucket=self.bucket_name,
VersioningConfiguration={'Status': 'Suspended'})
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.clear_bucket(self.client, self.bucket_name)
super(TestObjectVersioning, self).tearDown()
def test_setup(self):
bucket_name = self.create_name('new-bucket')
resp = self.client.create_bucket(Bucket=bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
expected_location = '/%s' % bucket_name
self.assertEqual(expected_location, resp['Location'])
headers = HeaderKeyDict(resp['ResponseMetadata']['HTTPHeaders'])
self.assertEqual('0', headers['content-length'])
self.assertEqual(expected_location, headers['location'])
# get versioning
resp = self.client.get_bucket_versioning(Bucket=bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertNotIn('Status', resp)
# put versioning
versioning_config = {
'Status': 'Enabled',
}
resp = self.client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration=versioning_config)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
# ... now it's enabled
def check_status():
resp = self.client.get_bucket_versioning(Bucket=bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
try:
self.assertEqual('Enabled', resp['Status'])
except KeyError:
self.fail('Status was not in %r' % resp)
retry(check_status)
# send over some bogus junk
versioning_config['Status'] = 'Disabled'
with self.assertRaises(ClientError) as ctx:
self.client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration=versioning_config)
expected_err = 'An error occurred (MalformedXML) when calling the ' \
'PutBucketVersioning operation: The XML you provided was ' \
'not well-formed or did not validate against our published schema'
self.assertEqual(expected_err, str(ctx.exception))
# disable it
versioning_config['Status'] = 'Suspended'
resp = self.client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration=versioning_config)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
# ... now it's disabled again
def check_status():
resp = self.client.get_bucket_versioning(Bucket=bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('Suspended', resp['Status'])
retry(check_status)
def test_upload_fileobj_versioned(self):
obj_data = self.create_name('some-data').encode('ascii')
obj_etag = md5(obj_data, usedforsecurity=False).hexdigest()
obj_name = self.create_name('versioned-obj')
self.client.upload_fileobj(six.BytesIO(obj_data),
self.bucket_name, obj_name)
# object is in the listing
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual([{
'ETag': '"%s"' % obj_etag,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# object version listing
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
obj.pop('VersionId')
self.assertEqual([{
'ETag': '"%s"' % obj_etag,
'IsLatest': True,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# overwrite the object
new_obj_data = self.create_name('some-new-data').encode('ascii')
new_obj_etag = md5(new_obj_data, usedforsecurity=False).hexdigest()
self.client.upload_fileobj(six.BytesIO(new_obj_data),
self.bucket_name, obj_name)
# new object is in the listing
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual([{
'ETag': '"%s"' % new_obj_etag,
'Key': obj_name,
'Size': len(new_obj_data),
'StorageClass': 'STANDARD',
}], objs)
# both object versions in the versions listing
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
obj.pop('VersionId')
self.assertEqual([{
'ETag': '"%s"' % new_obj_etag,
'IsLatest': True,
'Key': obj_name,
'Size': len(new_obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % obj_etag,
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
def test_delete_versioned_objects(self):
etags = []
obj_name = self.create_name('versioned-obj')
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags.insert(0, md5(obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(six.BytesIO(obj_data),
self.bucket_name, obj_name)
# only one object appears in the listing
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual([{
'ETag': '"%s"' % etags[0],
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# but everything is layed out in the object versions listing
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
versions.append(obj.pop('VersionId'))
self.assertEqual([{
'ETag': '"%s"' % etags[0],
'IsLatest': True,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % etags[1],
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % etags[2],
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# we can delete a specific version
resp = self.client.delete_object(Bucket=self.bucket_name,
Key=obj_name,
VersionId=versions[1])
# and that just pulls it out of the versions listing
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
obj.pop('VersionId')
self.assertEqual([{
'ETag': '"%s"' % etags[0],
'IsLatest': True,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % etags[2],
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# ... but the current listing is unaffected
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual([{
'ETag': '"%s"' % etags[0],
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# OTOH, if you delete specifically the latest version
# we can delete a specific version
resp = self.client.delete_object(Bucket=self.bucket_name,
Key=obj_name,
VersionId=versions[0])
# the versions listing has a new IsLatest
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
obj.pop('VersionId')
self.assertEqual([{
'ETag': '"%s"' % etags[2],
'IsLatest': True,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# and the stack pops
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual([{
'ETag': '"%s"' % etags[2],
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
def test_delete_versioned_deletes(self):
etags = []
obj_name = self.create_name('versioned-obj')
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags.insert(0, md5(obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(six.BytesIO(obj_data),
self.bucket_name, obj_name)
# and make a delete marker
self.client.delete_object(Bucket=self.bucket_name, Key=obj_name)
# current listing is empty
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
self.assertEqual([], objs)
# but everything is in layed out in the versions listing
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
versions.append(obj.pop('VersionId'))
self.assertEqual([{
'ETag': '"%s"' % etag,
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
} for etag in etags], objs)
# ... plus the delete markers
delete_markers = resp.get('DeleteMarkers', [])
marker_versions = []
for marker in delete_markers:
marker.pop('LastModified')
marker.pop('Owner')
marker_versions.append(marker.pop('VersionId'))
self.assertEqual([{
'Key': obj_name,
'IsLatest': is_latest,
} for is_latest in (True, False, False)], delete_markers)
# delete an old delete markers
resp = self.client.delete_object(Bucket=self.bucket_name,
Key=obj_name,
VersionId=marker_versions[2])
# since IsLatest is still marker we'll raise NoSuchKey
with self.assertRaises(ClientError) as caught:
resp = self.client.get_object(Bucket=self.bucket_name,
Key=obj_name)
expected_err = 'An error occurred (NoSuchKey) when calling the ' \
'GetObject operation: The specified key does not exist.'
self.assertEqual(expected_err, str(caught.exception))
# now delete the delete marker (IsLatest)
resp = self.client.delete_object(Bucket=self.bucket_name,
Key=obj_name,
VersionId=marker_versions[0])
# most recent version is now latest
resp = self.client.get_object(Bucket=self.bucket_name,
Key=obj_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[0], resp['ETag'])
# now delete the IsLatest object version
resp = self.client.delete_object(Bucket=self.bucket_name,
Key=obj_name,
VersionId=versions[0])
# and object is deleted again
with self.assertRaises(ClientError) as caught:
resp = self.client.get_object(Bucket=self.bucket_name,
Key=obj_name)
expected_err = 'An error occurred (NoSuchKey) when calling the ' \
'GetObject operation: The specified key does not exist.'
self.assertEqual(expected_err, str(caught.exception))
# delete marker IsLatest
resp = self.client.list_object_versions(Bucket=self.bucket_name)
delete_markers = resp.get('DeleteMarkers', [])
for marker in delete_markers:
marker.pop('LastModified')
marker.pop('Owner')
self.assertEqual([{
'Key': obj_name,
'IsLatest': True,
'VersionId': marker_versions[1],
}], delete_markers)
def test_multipart_upload(self):
obj_name = self.create_name('versioned-obj')
obj_data = b'data'
mu = self.client.create_multipart_upload(
Bucket=self.bucket_name,
Key=obj_name)
part_md5 = self.client.upload_part(
Bucket=self.bucket_name,
Key=obj_name,
UploadId=mu['UploadId'],
PartNumber=1,
Body=obj_data)['ETag']
complete_response = self.client.complete_multipart_upload(
Bucket=self.bucket_name,
Key=obj_name,
UploadId=mu['UploadId'],
MultipartUpload={'Parts': [
{'PartNumber': 1, 'ETag': part_md5},
]})
obj_etag = complete_response['ETag']
delete_response = self.client.delete_object(
Bucket=self.bucket_name,
Key=obj_name)
marker_version_id = delete_response['VersionId']
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
versions.append(obj.pop('VersionId'))
self.assertEqual([{
'ETag': obj_etag,
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
markers = resp.get('DeleteMarkers', [])
for marker in markers:
marker.pop('LastModified')
marker.pop('Owner')
self.assertEqual([{
'IsLatest': True,
'Key': obj_name,
'VersionId': marker_version_id,
}], markers)
# Can still get the old version
resp = self.client.get_object(
Bucket=self.bucket_name,
Key=obj_name,
VersionId=versions[0])
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual(obj_etag, resp['ETag'])
delete_response = self.client.delete_object(
Bucket=self.bucket_name,
Key=obj_name,
VersionId=versions[0])
resp = self.client.list_object_versions(Bucket=self.bucket_name)
self.assertEqual([], resp.get('Versions', []))
markers = resp.get('DeleteMarkers', [])
for marker in markers:
marker.pop('LastModified')
marker.pop('Owner')
self.assertEqual([{
'IsLatest': True,
'Key': obj_name,
'VersionId': marker_version_id,
}], markers)
def test_get_versioned_object(self):
etags = []
obj_name = self.create_name('versioned-obj')
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
# TODO: pull etag from response instead
etags.insert(0, md5(obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(
six.BytesIO(obj_data), self.bucket_name, obj_name)
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
versions.append(obj.pop('VersionId'))
self.assertEqual([{
'ETag': '"%s"' % etags[0],
'IsLatest': True,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % etags[1],
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}, {
'ETag': '"%s"' % etags[2],
'IsLatest': False,
'Key': obj_name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
}], objs)
# un-versioned get_object returns IsLatest
resp = self.client.get_object(Bucket=self.bucket_name, Key=obj_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[0], resp['ETag'])
# but you can get any object by version
for i, version in enumerate(versions):
resp = self.client.get_object(
Bucket=self.bucket_name, Key=obj_name, VersionId=version)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[i], resp['ETag'])
# and head_object works about the same
resp = self.client.head_object(Bucket=self.bucket_name, Key=obj_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[0], resp['ETag'])
self.assertEqual(versions[0], resp['VersionId'])
for version, etag in zip(versions, etags):
resp = self.client.head_object(
Bucket=self.bucket_name, Key=obj_name, VersionId=version)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual(version, resp['VersionId'])
self.assertEqual('"%s"' % etag, resp['ETag'])
def test_get_versioned_object_invalid_params(self):
with self.assertRaises(ClientError) as ctx:
self.client.list_object_versions(Bucket=self.bucket_name,
KeyMarker='',
VersionIdMarker='bogus')
expected_err = 'An error occurred (InvalidArgument) when calling ' \
'the ListObjectVersions operation: Invalid version id specified'
self.assertEqual(expected_err, str(ctx.exception))
with self.assertRaises(ClientError) as ctx:
self.client.list_object_versions(
Bucket=self.bucket_name,
VersionIdMarker='a' * 32)
expected_err = 'An error occurred (InvalidArgument) when calling ' \
'the ListObjectVersions operation: A version-id marker cannot ' \
'be specified without a key marker.'
self.assertEqual(expected_err, str(ctx.exception))
def test_get_versioned_object_key_marker(self):
obj00_name = self.create_name('00-versioned-obj')
obj01_name = self.create_name('01-versioned-obj')
names = [obj00_name] * 3 + [obj01_name] * 3
latest = [True, False, False, True, False, False]
etags = []
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags.insert(0, '"%s"' % md5(
obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(
six.BytesIO(obj_data), self.bucket_name, obj01_name)
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags.insert(0, '"%s"' % md5(
obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(
six.BytesIO(obj_data), self.bucket_name, obj00_name)
resp = self.client.list_object_versions(Bucket=self.bucket_name)
versions = []
objs = []
for o in resp.get('Versions', []):
versions.append(o['VersionId'])
objs.append({
'Key': o['Key'],
'VersionId': o['VersionId'],
'IsLatest': o['IsLatest'],
'ETag': o['ETag'],
})
expected = [{
'Key': name,
'VersionId': version,
'IsLatest': is_latest,
'ETag': etag,
} for name, etag, version, is_latest in zip(
names, etags, versions, latest)]
self.assertEqual(expected, objs)
# on s3 this makes expected[0]['IsLatest'] magicaly change to False?
# resp = self.client.list_object_versions(Bucket=self.bucket_name,
# KeyMarker='',
# VersionIdMarker=versions[0])
# objs = [{
# 'Key': o['Key'],
# 'VersionId': o['VersionId'],
# 'IsLatest': o['IsLatest'],
# 'ETag': o['ETag'],
# } for o in resp.get('Versions', [])]
# self.assertEqual(expected, objs)
# KeyMarker skips past that key
resp = self.client.list_object_versions(Bucket=self.bucket_name,
KeyMarker=obj00_name)
objs = [{
'Key': o['Key'],
'VersionId': o['VersionId'],
'IsLatest': o['IsLatest'],
'ETag': o['ETag'],
} for o in resp.get('Versions', [])]
self.assertEqual(expected[3:], objs)
# KeyMarker with VersionIdMarker skips past that version
resp = self.client.list_object_versions(Bucket=self.bucket_name,
KeyMarker=obj00_name,
VersionIdMarker=versions[0])
objs = [{
'Key': o['Key'],
'VersionId': o['VersionId'],
'IsLatest': o['IsLatest'],
'ETag': o['ETag'],
} for o in resp.get('Versions', [])]
self.assertEqual(expected[1:], objs)
# KeyMarker with bogus version skips past that key
resp = self.client.list_object_versions(
Bucket=self.bucket_name,
KeyMarker=obj00_name,
VersionIdMarker=versions[4])
objs = [{
'Key': o['Key'],
'VersionId': o['VersionId'],
'IsLatest': o['IsLatest'],
'ETag': o['ETag'],
} for o in resp.get('Versions', [])]
self.assertEqual(expected[3:], objs)
def test_list_objects(self):
etags = defaultdict(list)
for i in range(3):
obj_name = self.create_name('versioned-obj')
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags[obj_name].insert(0, md5(
obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(
six.BytesIO(obj_data), self.bucket_name, obj_name)
# both unversioned list_objects responses are similar
expected = []
for name, obj_etags in sorted(etags.items()):
expected.append({
'ETag': '"%s"' % obj_etags[0],
'Key': name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
})
resp = self.client.list_objects(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
# one difference seems to be the Owner key
self.assertEqual({'DisplayName', 'ID'},
set(obj.pop('Owner').keys()))
self.assertEqual(expected, objs)
resp = self.client.list_objects_v2(Bucket=self.bucket_name)
objs = resp.get('Contents', [])
for obj in objs:
obj.pop('LastModified')
self.assertEqual(expected, objs)
# versioned listings has something for everyone
expected = []
for name, obj_etags in sorted(etags.items()):
is_latest = True
for etag in obj_etags:
expected.append({
'ETag': '"%s"' % etag,
'IsLatest': is_latest,
'Key': name,
'Size': len(obj_data),
'StorageClass': 'STANDARD',
})
is_latest = False
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
obj.pop('LastModified')
obj.pop('Owner')
versions.append(obj.pop('VersionId'))
self.assertEqual(expected, objs)
def test_copy_object(self):
etags = []
obj_name = self.create_name('versioned-obj')
for i in range(3):
obj_data = self.create_name('some-data-%s' % i).encode('ascii')
etags.insert(0, md5(
obj_data, usedforsecurity=False).hexdigest())
self.client.upload_fileobj(
six.BytesIO(obj_data), self.bucket_name, obj_name)
resp = self.client.list_object_versions(Bucket=self.bucket_name)
objs = resp.get('Versions', [])
versions = []
for obj in objs:
versions.append(obj.pop('VersionId'))
# CopySource can just be Bucket/Key string
first_target = self.create_name('target-obj1')
copy_resp = self.client.copy_object(
Bucket=self.bucket_name, Key=first_target,
CopySource='%s/%s' % (self.bucket_name, obj_name))
self.assertEqual(versions[0], copy_resp['CopySourceVersionId'])
# and you'll just get the most recent version
resp = self.client.head_object(Bucket=self.bucket_name,
Key=first_target)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[0], resp['ETag'])
# or you can be more explicit
explicit_target = self.create_name('target-%s' % versions[0])
copy_source = {'Bucket': self.bucket_name, 'Key': obj_name,
'VersionId': versions[0]}
copy_resp = self.client.copy_object(
Bucket=self.bucket_name, Key=explicit_target,
CopySource=copy_source)
self.assertEqual(versions[0], copy_resp['CopySourceVersionId'])
# and you still get the same thing
resp = self.client.head_object(Bucket=self.bucket_name,
Key=explicit_target)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[0], resp['ETag'])
# but you can also copy from a specific version
version_target = self.create_name('target-%s' % versions[2])
copy_source['VersionId'] = versions[2]
copy_resp = self.client.copy_object(
Bucket=self.bucket_name, Key=version_target,
CopySource=copy_source)
self.assertEqual(versions[2], copy_resp['CopySourceVersionId'])
resp = self.client.head_object(Bucket=self.bucket_name,
Key=version_target)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('"%s"' % etags[2], resp['ETag'])
| swift-master | test/s3api/test_versioning.py |
# Copyright (c) 2021 Nvidia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test.s3api import BaseS3TestCase
from botocore.exceptions import ClientError
class TestMultiPartUploads(BaseS3TestCase):
maxDiff = None
def setUp(self):
self.client = self.get_s3_client(1)
self.bucket_name = self.create_name('test-mpu')
resp = self.client.create_bucket(Bucket=self.bucket_name)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
def tearDown(self):
self.clear_bucket(self.client, self.bucket_name)
super(TestMultiPartUploads, self).tearDown()
def test_basic_upload(self):
key_name = self.create_name('key')
create_mpu_resp = self.client.create_multipart_upload(
Bucket=self.bucket_name, Key=key_name)
self.assertEqual(200, create_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
upload_id = create_mpu_resp['UploadId']
parts = []
for i in range(1, 3):
body = ('%d' % i) * 5 * (2 ** 20)
part_resp = self.client.upload_part(
Body=body, Bucket=self.bucket_name, Key=key_name,
PartNumber=i, UploadId=upload_id)
self.assertEqual(200, part_resp[
'ResponseMetadata']['HTTPStatusCode'])
parts.append({
'ETag': part_resp['ETag'],
'PartNumber': i,
})
list_parts_resp = self.client.list_parts(
Bucket=self.bucket_name, Key=key_name,
UploadId=upload_id,
)
self.assertEqual(200, list_parts_resp[
'ResponseMetadata']['HTTPStatusCode'])
self.assertEqual(parts, [{k: p[k] for k in ('ETag', 'PartNumber')}
for p in list_parts_resp['Parts']])
complete_mpu_resp = self.client.complete_multipart_upload(
Bucket=self.bucket_name, Key=key_name,
MultipartUpload={
'Parts': parts,
},
UploadId=upload_id,
)
self.assertEqual(200, complete_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
def test_create_list_abort_multipart_uploads(self):
key_name = self.create_name('key')
create_mpu_resp = self.client.create_multipart_upload(
Bucket=self.bucket_name, Key=key_name)
self.assertEqual(200, create_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
upload_id = create_mpu_resp['UploadId']
# our upload is in progress
list_mpu_resp = self.client.list_multipart_uploads(
Bucket=self.bucket_name)
self.assertEqual(200, list_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
found_uploads = list_mpu_resp.get('Uploads', [])
self.assertEqual(1, len(found_uploads), found_uploads)
self.assertEqual(upload_id, found_uploads[0]['UploadId'])
abort_resp = self.client.abort_multipart_upload(
Bucket=self.bucket_name,
Key=key_name,
UploadId=upload_id,
)
self.assertEqual(204, abort_resp[
'ResponseMetadata']['HTTPStatusCode'])
# no more inprogress uploads
list_mpu_resp = self.client.list_multipart_uploads(
Bucket=self.bucket_name)
self.assertEqual(200, list_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
self.assertEqual([], list_mpu_resp.get('Uploads', []))
def test_complete_multipart_upload_malformed_request(self):
key_name = self.create_name('key')
create_mpu_resp = self.client.create_multipart_upload(
Bucket=self.bucket_name, Key=key_name)
self.assertEqual(200, create_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
upload_id = create_mpu_resp['UploadId']
parts = []
for i in range(1, 3):
body = ('%d' % i) * 5 * (2 ** 20)
part_resp = self.client.upload_part(
Body=body, Bucket=self.bucket_name, Key=key_name,
PartNumber=i, UploadId=upload_id)
self.assertEqual(200, part_resp[
'ResponseMetadata']['HTTPStatusCode'])
parts.append({
'PartNumber': i,
'ETag': '',
})
with self.assertRaises(ClientError) as caught:
self.client.complete_multipart_upload(
Bucket=self.bucket_name, Key=key_name,
MultipartUpload={
'Parts': parts,
},
UploadId=upload_id,
)
complete_mpu_resp = caught.exception.response
self.assertEqual(400, complete_mpu_resp[
'ResponseMetadata']['HTTPStatusCode'])
self.assertEqual('InvalidPart', complete_mpu_resp[
'Error']['Code'])
self.assertTrue(complete_mpu_resp['Error']['Message'].startswith(
'One or more of the specified parts could not be found.'
), complete_mpu_resp['Error']['Message'])
self.assertEqual(complete_mpu_resp['Error']['UploadId'], upload_id)
self.assertIn(complete_mpu_resp['Error']['PartNumber'], ('1', '2'))
self.assertEqual(complete_mpu_resp['Error']['ETag'], None)
| swift-master | test/s3api/test_mpu.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import logging.handlers
import sys
from contextlib import contextmanager, closing
from collections import defaultdict
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable # py2
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet import greenpool, debug as eventlet_debug
from eventlet.green import socket
from tempfile import mkdtemp, mkstemp, gettempdir
from shutil import rmtree
import signal
import json
import random
import errno
import xattr
from io import BytesIO
from uuid import uuid4
import six
import six.moves.cPickle as pickle
from six.moves import range
from six.moves.http_client import HTTPException
from swift.common import storage_policy, swob, utils, exceptions
from swift.common.memcached import MemcacheConnectionError
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
from swift.common.utils import Timestamp, md5
from test import get_config
from test.debug_logger import FakeLogger
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.ring import Ring, RingData, RingBuilder
from swift.obj import server
import functools
from gzip import GzipFile
import mock as mocklib
import inspect
from unittest import SkipTest
EMPTY_ETAG = md5(usedforsecurity=False).hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = b'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setUp the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
def unpatch_cleanup(cls_self):
if cls_self._policies_patched:
self.__exit__(None, None, None)
cls_self._policies_patched = False
def setUp(cls_self):
if not getattr(cls_self, '_policies_patched', False):
self.__enter__()
cls_self._policies_patched = True
cls_self.addCleanup(unpatch_cleanup, cls_self)
orig_setUp(cls_self)
cls.setUp = setUp
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
with self:
return f(*args, **kwargs)
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
try:
self._setup_rings()
except: # noqa
self.__exit__(None, None, None)
raise
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000, separate_replication=False,
next_part_power=None, reload_time=15):
self.serialized_path = '/foo/bar/object.ring.gz'
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
self._init_device_char()
self.separate_replication = separate_replication
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.reload_time = reload_time
self.set_replicas(replicas)
self._next_part_power = next_part_power
self._reload()
def has_changed(self):
"""
The real implementation uses getmtime on the serialized_path attribute,
which doesn't exist on our fake and relies on the implementation of
_reload which we override. So ... just NOOPE.
"""
return False
def _reload(self):
self._rtime = time.time()
@property
def device_char(self):
return next(self._device_char_iter)
def _init_device_char(self):
self._device_char_iter = itertools.cycle(
['sd%s' % chr(ord('a') + x) for x in range(26)])
def add_node(self, dev):
# round trip through json to ensure unicode like real rings
self._devs.append(json.loads(json.dumps(dev)))
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
self._init_device_char()
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
if self.separate_replication:
repl_ip = '10.0.1.%s' % x
repl_port = port + 100
else:
repl_ip, repl_port = ip, port
dev = {
'ip': ip,
'replication_ip': repl_ip,
'port': port,
'replication_port': repl_port,
'device': self.device_char,
'zone': x % 3,
'region': x % 2,
'id': x,
'weight': 1,
}
self.add_node(dev)
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
index_counter = itertools.count()
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
ip = '10.0.0.%s' % x
port = self._base_port + x
if self.separate_replication:
repl_ip = '10.0.1.%s' % x
repl_port = port + 100
else:
repl_ip, repl_port = ip, port
yield {'ip': ip,
'replication_ip': repl_ip,
'port': port,
'replication_port': repl_port,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x,
'handoff_index': next(index_counter)}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6200}
dev2 = {'id': 1, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6200}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
def write_stub_builder(tmpdir, region=1, name=''):
"""
Pretty much just a three node, three replica, 8 part power builder...
:param tmpdir: a place to write the builder, be sure to clean it up!
:param region: an integer, fills in region and ip
:param name: the name of the builder (i.e. <name>.builder)
"""
name = name or str(region)
replicas = 3
builder = RingBuilder(8, replicas, 1)
for i in range(replicas):
dev = {'weight': 100,
'region': '%d' % region,
'zone': '1',
'ip': '10.0.0.%d' % region,
'port': '3600',
'device': 'sdb%d' % i}
builder.add_dev(dev)
builder.rebalance()
builder_file = os.path.join(tmpdir, '%s.builder' % name)
builder.save(builder_file)
return builder, builder_file
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6200,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = replicas
self._part_shift = 32 - part_power
self._reload()
def has_changed(self):
return False
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
self._update_bookkeeping()
def track(f):
def wrapper(self, *a, **kw):
self.calls.append(getattr(mocklib.call, f.__name__)(*a, **kw))
return f(self, *a, **kw)
return wrapper
class FakeMemcache(object):
def __init__(self, error_on_set=None, error_on_get=None):
self.store = {}
self.calls = []
self.error_on_incr = False
self.error_on_get = error_on_get or []
self.error_on_set = error_on_set or []
self.init_incr_return_neg = False
def clear_calls(self):
del self.calls[:]
@track
def get(self, key, raise_on_error=False):
if self.error_on_get and self.error_on_get.pop(0):
if raise_on_error:
raise MemcacheConnectionError()
return self.store.get(key)
@property
def keys(self):
return self.store.keys
@track
def set(self, key, value, serialize=True, time=0, raise_on_error=False):
if self.error_on_set and self.error_on_set.pop(0):
if raise_on_error:
raise MemcacheConnectionError()
if serialize:
value = json.loads(json.dumps(value))
else:
assert isinstance(value, (str, bytes))
self.store[key] = value
return True
@track
def incr(self, key, delta=1, time=0):
if self.error_on_incr:
raise MemcacheConnectionError('Memcache restarting')
if self.init_incr_return_neg:
# simulate initial hit, force reset of memcache
self.init_incr_return_neg = False
return -10000000
self.store[key] = int(self.store.setdefault(key, 0)) + delta
if self.store[key] < 0:
self.store[key] = 0
return self.store[key]
# tracked via incr()
def decr(self, key, delta=1, time=0):
return self.incr(key, delta=-delta, time=time)
@track
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def delete_all(self):
self.store.clear()
# This decorator only makes sense in the context of FakeMemcache;
# may as well clean it up now
del track
class FakeIterable(object):
def __init__(self, values):
self.next_call_count = 0
self.close_call_count = 0
self.values = iter(values)
def __iter__(self):
return self
def __next__(self):
self.next_call_count += 1
return next(self.values)
next = __next__ # py2
def close(self):
self.close_call_count += 1
def readuntil2crlfs(fd):
rv = b''
lc = b''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == b'\r' and lc != b'\n':
crlfs = 0
if lc == b'\r' and c == b'\n':
crlfs += 1
lc = c
return rv
def readlength(fd, size, timeout=1.0):
buf = b''
with eventlet.Timeout(timeout):
while len(buf) < size:
chunk = fd.read(min(64, size - len(buf)))
buf += chunk
if len(buf) >= size:
break
return buf
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
@contextmanager
def quiet_eventlet_exceptions():
orig_state = greenpool.DEBUG
eventlet_debug.hub_exceptions(False)
try:
yield
finally:
eventlet_debug.hub_exceptions(orig_state)
@contextmanager
def mock_check_drive(isdir=False, ismount=False):
"""
All device/drive/mount checking should be done through the constraints
module. If we keep the mocking consistently within that module, we can
keep our tests robust to further rework on that interface.
Replace the constraint modules underlying os calls with mocks.
:param isdir: return value of constraints isdir calls, default False
:param ismount: return value of constraints ismount calls, default False
:returns: a dict of constraint module mocks
"""
mock_base = 'swift.common.constraints.'
with mocklib.patch(mock_base + 'isdir') as mock_isdir, \
mocklib.patch(mock_base + 'utils.ismount') as mock_ismount:
mock_isdir.return_value = isdir
mock_ismount.return_value = ismount
yield {
'isdir': mock_isdir,
'ismount': mock_ismount,
}
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if inspect.isclass(status) and issubclass(status, Exception):
raise status('FakeStatus Error')
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def __repr__(self):
return '%s(%s, expect_status=%r, response_sleep=%s)' % (
self.__class__.__name__, self.status,
self.expect_status, self.response_sleep)
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
SLOW_READS = 4
SLOW_WRITES = 4
def __init__(self, status, etag=None, body=b'', timestamp=-1,
headers=None, expect_headers=None, connection_id=None,
give_send=None, give_expect=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
if timestamp == -1:
# -1 is reserved to mean "magic default"
if status.status != 404:
self.timestamp = '1'
else:
self.timestamp = '0'
else:
# tests may specify int, string, Timestamp or None
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
self.give_expect = give_expect
self.closed = False
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# if we're going to be slow, we need a body to send slowly
am_slow, _junk = self.get_slow()
if am_slow and len(self.body) < self.SLOW_READS:
self.body += b" " * (self.SLOW_READS - len(self.body))
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
if self.give_expect:
self.give_expect(self)
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, bytes):
etag = ('"' + md5(
self.body, usedforsecurity=False).hexdigest() + '"')
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
am_slow, _junk = self.get_slow()
headers = HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < self.SLOW_READS:
slowly_read_byte = self.body[self.sent:self.sent + 1]
self.sent += 1
eventlet.sleep(value)
return slowly_read_byte
if amt is None:
rv = self.body[self.sent:]
else:
rv = self.body[self.sent:self.sent + amt]
self.sent += len(rv)
return rv
def send(self, data=None):
if self.give_send:
self.give_send(self, data)
am_slow, value = self.get_slow()
if am_slow:
if self.received < self.SLOW_WRITES:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return HeaderKeyDict(self.getheaders()).get(name, default)
def nuke_from_orbit(self):
# wrapped connections from buffered_http have this helper
self.close()
def close(self):
self.closed = True
# unless tests provide timestamps we use the "magic default"
timestamps_iter = iter(kwargs.get('timestamps') or [-1] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
unexpected_requests = []
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
try:
i, status = next(conn_id_and_code_iter)
except StopIteration:
# the code under test may swallow the StopIteration, so by logging
# unexpected requests here we allow the test framework to check for
# them after the connect function has been used.
unexpected_requests.append((args, ckwargs))
raise
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
if six.PY2:
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
else:
argspec = inspect.getfullargspec(give_conn_fn)
if argspec.varkw or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if isinstance(status, int) and status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or b''
else:
body = next(body_iter)
conn = FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'),
give_expect=kwargs.get('give_expect'))
if 'capture_connections' in kwargs:
kwargs['capture_connections'].append(conn)
return conn
connect.unexpected_requests = unexpected_requests
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
responses = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
if six.PY2 and not isinstance(ip, bytes):
ip = ip.encode('ascii')
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
kwargs['capture_connections'] = responses
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
fake_conn.responses = responses
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
if fake_conn.unexpected_requests:
raise AssertionError('unexpected requests:\n%s' % '\n '.join(
'%r' % (req,) for req in fake_conn.unexpected_requests))
def make_timestamp_iter(offset=0):
return iter(Timestamp(t)
for t in itertools.count(int(time.time()) + offset))
@contextmanager
def mock_timestamp_now(now=None, klass=Timestamp):
if now is None:
now = klass.now()
with mocklib.patch('swift.common.utils.Timestamp.now',
classmethod(lambda c: now)):
yield now
@contextmanager
def mock_timestamp_now_with_iter(ts_iter):
with mocklib.patch('swift.common.utils.Timestamp.now',
side_effect=ts_iter):
yield
class Timeout(object):
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
signal.signal(signal.SIGALRM, self._exit)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def _exit(self, signum, frame):
class TimeoutException(Exception):
pass
raise TimeoutException
def requires_o_tmpfile_support_in_tmp(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not utils.o_tmpfile_in_tmpdir_supported():
raise SkipTest('Requires O_TMPFILE support in TMPDIR')
return func(*args, **kwargs)
return wrapper
class StubResponse(object):
def __init__(self, status, body=b'', headers=None, frag_index=None,
slowdown=None):
self.status = status
self.body = body
self.readable = BytesIO(body)
try:
self._slowdown = iter(slowdown)
except TypeError:
self._slowdown = iter([slowdown])
self.headers = HeaderKeyDict(headers)
if frag_index is not None:
self.headers['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index
fake_reason = ('Fake', 'This response is a lie.')
self.reason = swob.RESPONSE_REASONS.get(status, fake_reason)[0]
def slowdown(self):
try:
wait = next(self._slowdown)
except StopIteration:
wait = None
if wait is not None:
eventlet.sleep(wait)
def nuke_from_orbit(self):
if hasattr(self, 'swift_conn'):
self.swift_conn.close()
def getheader(self, header_name, default=None):
return self.headers.get(header_name, default)
def getheaders(self):
if 'Content-Length' not in self.headers:
self.headers['Content-Length'] = len(self.body)
return self.headers.items()
def read(self, amt=0):
self.slowdown()
return self.readable.read(amt)
def readline(self, size=-1):
self.slowdown()
return self.readable.readline(size)
def __repr__(self):
info = ['Status: %s' % self.status]
if self.headers:
info.append('Headers: %r' % dict(self.headers))
if self.body:
info.append('Body: %r' % self.body)
return '<StubResponse %s>' % ', '.join(info)
def encode_frag_archive_bodies(policy, body):
"""
Given a stub body produce a list of complete frag_archive bodies as
strings in frag_index order.
:param policy: a StoragePolicy instance, with policy_type EC_POLICY
:param body: a string, the body to encode into frag archives
:returns: list of strings, the complete frag_archive bodies for the given
plaintext
"""
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [body[x:x + segment_size]
for x in range(0, len(body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = policy.pyeclib_driver.encode(chunk) \
* policy.ec_duplication_factor
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [b''.join(frags)
for frags in zip(*fragment_payloads)]
return ec_archive_bodies
def make_ec_object_stub(test_body, policy, timestamp):
segment_size = policy.ec_segment_size
test_body = test_body or (
b'test' * segment_size)[:-random.randint(1, 1000)]
timestamp = timestamp or utils.Timestamp.now()
etag = md5(test_body, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(policy, test_body)
return {
'body': test_body,
'etag': etag,
'frags': ec_archive_bodies,
'timestamp': timestamp
}
def fake_ec_node_response(node_frags, policy):
"""
Given a list of entries for each node in ring order, where the entries
are a dict (or list of dicts) which describes the fragment (or
fragments) that are on the node; create a function suitable for use
with capture_http_requests that will accept a req object and return a
response that will suitably fake the behavior of an object server who
had the given fragments on disk at the time.
:param node_frags: a list. Each item in the list describes the
fragments that are on a node; each item is a dict or list of dicts,
each dict describing a single fragment; where the item is a list,
repeated calls to get_response will return fragments in the order
of the list; each dict has keys:
- obj: an object stub, as generated by _make_ec_object_stub,
that defines all of the fragments that compose an object
at a specific timestamp.
- frag: the index of a fragment to be selected from the object
stub
- durable (optional): True if the selected fragment is durable
:param policy: storage policy to return
"""
node_map = {} # maps node ip and port to node index
all_nodes = []
call_count = {} # maps node index to get_response call count for node
def _build_node_map(req, policy):
part = utils.split_path(req['path'], 5, 5, True)[1]
all_nodes.extend(policy.object_ring.get_part_nodes(part))
all_nodes.extend(policy.object_ring.get_more_nodes(part))
for i, node in enumerate(all_nodes):
node_map[(node['ip'], node['port'])] = i
call_count[i] = 0
# normalize node_frags to a list of fragments for each node even
# if there's only one fragment in the dataset provided.
for i, frags in enumerate(node_frags):
if isinstance(frags, dict):
node_frags[i] = [frags]
def get_response(req):
requested_policy = int(
req['headers']['X-Backend-Storage-Policy-Index'])
if int(policy) != requested_policy:
AssertionError(
"Requested polciy doesn't fit the fake response policy")
if not node_map:
_build_node_map(req, policy)
try:
node_index = node_map[(req['ip'], req['port'])]
except KeyError:
raise Exception("Couldn't find node %s:%s in %r" % (
req['ip'], req['port'], all_nodes))
try:
frags = node_frags[node_index]
except IndexError:
raise Exception('Found node %r:%r at index %s - '
'but only got %s stub response nodes' % (
req['ip'], req['port'], node_index,
len(node_frags)))
if not frags:
return StubResponse(404)
# determine response fragment (if any) for this call
resp_frag = frags[call_count[node_index]]
call_count[node_index] += 1
frag_prefs = req['headers'].get('X-Backend-Fragment-Preferences')
if not (frag_prefs or resp_frag.get('durable', True)):
return StubResponse(404)
# prepare durable timestamp and backend frags header for this node
obj_stub = resp_frag['obj']
ts2frags = defaultdict(list)
durable_timestamp = None
for frag in frags:
ts_frag = frag['obj']['timestamp']
if frag.get('durable', True):
durable_timestamp = ts_frag.internal
ts2frags[ts_frag].append(frag['frag'])
try:
body = obj_stub['frags'][resp_frag['frag']]
except IndexError as err:
raise Exception(
'Frag index %s not defined: node index %s, frags %r\n%s' %
(resp_frag['frag'], node_index, [f['frag'] for f in frags],
err))
headers = {
'X-Object-Sysmeta-Ec-Content-Length': len(obj_stub['body']),
'X-Object-Sysmeta-Ec-Etag': obj_stub['etag'],
'X-Object-Sysmeta-Ec-Frag-Index':
policy.get_backend_index(resp_frag['frag']),
'X-Backend-Timestamp': obj_stub['timestamp'].internal,
'X-Timestamp': obj_stub['timestamp'].normal,
'X-Backend-Data-Timestamp': obj_stub['timestamp'].internal,
'X-Backend-Fragments':
server._make_backend_fragments_header(ts2frags)
}
if durable_timestamp:
headers['X-Backend-Durable-Timestamp'] = durable_timestamp
return StubResponse(200, body, headers)
return get_response
supports_xattr_cached_val = None
def xattr_supported_check():
"""
This check simply sets more than 4k of metadata on a tempfile and
returns True if it worked and False if not.
We want to use *more* than 4k of metadata in this check because
some filesystems (eg ext4) only allow one blocksize worth of
metadata. The XFS filesystem doesn't have this limit, and so this
check returns True when TMPDIR is XFS. This check will return
False under ext4 (which supports xattrs <= 4k) and tmpfs (which
doesn't support xattrs at all).
"""
global supports_xattr_cached_val
if supports_xattr_cached_val is not None:
return supports_xattr_cached_val
# assume the worst -- xattrs aren't supported
supports_xattr_cached_val = False
big_val = b'x' * (4096 + 1) # more than 4k of metadata
try:
fd, tmppath = mkstemp()
xattr.setxattr(fd, 'user.swift.testing_key', big_val)
except IOError as e:
if errno.errorcode.get(e.errno) in ('ENOSPC', 'ENOTSUP', 'EOPNOTSUPP',
'ERANGE'):
# filesystem does not support xattr of this size
return False
raise
else:
supports_xattr_cached_val = True
return True
finally:
# clean up the tmpfile
os.close(fd)
os.unlink(tmppath)
def skip_if_no_xattrs():
if not xattr_supported_check():
raise SkipTest('Large xattrs not supported in `%s`. Skipping test' %
gettempdir())
def unlink_files(paths):
for path in paths:
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
class FakeHTTPResponse(object):
def __init__(self, resp):
self.resp = resp
@property
def status(self):
return self.resp.status_int
@property
def data(self):
return self.resp.body
def attach_fake_replication_rpc(rpc, replicate_hook=None, errors=None):
class FakeReplConnection(object):
def __init__(self, node, partition, hash_, logger):
self.logger = logger
self.node = node
self.partition = partition
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
self.host = node['replication_ip']
def replicate(self, op, *sync_args):
print('REPLICATE: %s, %s, %r' % (self.path, op, sync_args))
resp = None
if errors and op in errors and errors[op]:
resp = errors[op].pop(0)
if not resp:
replicate_args = self.path.lstrip('/').split('/')
args = [op] + copy.deepcopy(list(sync_args))
with mock_check_drive(isdir=not rpc.mount_check,
ismount=rpc.mount_check):
swob_response = rpc.dispatch(replicate_args, args)
resp = FakeHTTPResponse(swob_response)
if replicate_hook:
replicate_hook(op, *sync_args)
return resp
return FakeReplConnection
def group_by_byte(contents):
# This looks a little funny, but iterating through a byte string on py3
# yields a sequence of ints, not a sequence of single-byte byte strings
# as it did on py2.
byte_iter = (contents[i:i + 1] for i in range(len(contents)))
return [
(char, sum(1 for _ in grp))
for char, grp in itertools.groupby(byte_iter)]
def generate_db_path(tempdir, server_type):
return os.path.join(
tempdir, '%ss' % server_type, 'part', 'suffix', 'hash',
'%s-%s.db' % (server_type, uuid4()))
class ConfigAssertMixin(object):
"""
Use this with a TestCase to get py2/3 compatible assert for DuplicateOption
"""
def assertDuplicateOption(self, app_config, option_name, option_value):
"""
PY3 added a DuplicateOptionError, PY2 didn't seem to care
"""
if six.PY3:
self.assertDuplicateOptionError(app_config, option_name)
else:
self.assertDuplicateOptionOK(app_config, option_name, option_value)
def assertDuplicateOptionError(self, app_config, option_name):
with self.assertRaises(
utils.configparser.DuplicateOptionError) as ctx:
app_config()
msg = str(ctx.exception)
self.assertIn(option_name, msg)
self.assertIn('already exists', msg)
def assertDuplicateOptionOK(self, app_config, option_name, option_value):
app = app_config()
if hasattr(app, 'conf'):
found_value = app.conf[option_name]
else:
if hasattr(app, '_pipeline_final_app'):
# special case for proxy app!
app = app._pipeline_final_app
found_value = getattr(app, option_name)
self.assertEqual(found_value, option_value)
class FakeSource(object):
def __init__(self, chunks, headers=None, body=b''):
self.chunks = list(chunks)
self.headers = headers or {}
self.status = 200
self.swift_conn = None
self.body = body
def read(self, _read_size):
if self.chunks:
chunk = self.chunks.pop(0)
if chunk is None:
raise exceptions.ChunkReadTimeout()
else:
return chunk
else:
return self.body
def getheader(self, header):
# content-length for the whole object is generated dynamically
# by summing non-None chunks
if header.lower() == "content-length":
if self.chunks:
return str(sum(len(c) for c in self.chunks
if c is not None))
return len(self.read(-1))
return self.headers.get(header.lower())
def getheaders(self):
return [('content-length', self.getheader('content-length'))] + \
[(k, v) for k, v in self.headers.items()]
| swift-master | test/unit/__init__.py |
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides helper functions for unit tests.
This cannot be in test/unit/__init__.py because that module is imported by the
py34 unit test job and there are imports here that end up importing modules
that are not yet ported to py34, such wsgi.py which import mimetools.
"""
import os
from contextlib import closing
from gzip import GzipFile
from tempfile import mkdtemp
import time
import warnings
from eventlet import spawn, wsgi
import mock
from shutil import rmtree
import six.moves.cPickle as pickle
import swift
from swift.account import server as account_server
from swift.common import storage_policy
from swift.common.ring import RingData
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
from swift.common.middleware import listing_formats, proxy_logging
from swift.common import utils
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
from swift.common.http_protocol import SwiftHttpProtocol
from swift.container import server as container_server
from swift.obj import server as object_server
from swift.proxy import server as proxy_server
import swift.proxy.controllers.obj
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import write_fake_ring, DEFAULT_TEST_EC_TYPE, connect_tcp, \
readuntil2crlfs
def setup_servers(the_object_server=object_server, extra_conf=None):
"""
Setup proxy, account, container and object servers using a set of fake
rings and policies.
:param the_object_server: The object server module to use (optional,
defaults to swift.obj.server)
:param extra_conf: A dict of config options that will update the basic
config passed to all server instances.
:returns: A dict containing the following entries:
orig_POLICIES: the value of storage_policy.POLICIES prior to
it being patched with fake policies
orig_SysLogHandler: the value of utils.SysLogHandler prior to
it being patched
testdir: root directory used for test files
test_POLICIES: a StoragePolicyCollection of fake policies
test_servers: a tuple of test server instances
test_sockets: a tuple of sockets used by test servers
test_coros: a tuple of greenthreads in which test servers are
running
"""
context = {
"orig_POLICIES": storage_policy._POLICIES,
"orig_SysLogHandler": utils.SysLogHandler}
utils.HASH_PATH_SUFFIX = b'endcap'
utils.SysLogHandler = mock.MagicMock()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
context["testdir"] = _testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1',
'sdk1', 'sdl1'):
mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
'allow_versions': 't', 'node_timeout': 20}
if extra_conf:
conf.update(extra_conf)
context['conf'] = conf
prolis = listen_zero()
acc1lis = listen_zero()
acc2lis = listen_zero()
con1lis = listen_zero()
con2lis = listen_zero()
obj1lis = listen_zero()
obj2lis = listen_zero()
obj3lis = listen_zero()
obj4lis = listen_zero()
obj5lis = listen_zero()
obj6lis = listen_zero()
objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis]
context["test_sockets"] = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis,
obj4lis, obj5lis, obj6lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = storage_policy.StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096),
ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096,
ec_duplication_factor=2)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
# sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = storage_policy.POLICIES[policy_index]
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': objsock.getsockname()[1], 'device': dev}
for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
# write_fake_ring can't handle a 3-element ring, and the EC policy needs
# at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2),
# so we do it manually
devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
{'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
'port': obj3lis.getsockname()[1]},
{'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1',
'port': obj4lis.getsockname()[1]},
{'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1',
'port': obj5lis.getsockname()[1]},
{'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1',
'port': obj6lis.getsockname()[1]}]
pol3_replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
pol4_replica2part2dev_id = [[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 0],
[4, 5, 0, 1],
[5, 0, 1, 2]]
obj3_ring_path = os.path.join(
_testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
obj4_ring_path = os.path.join(
_testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj4_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh)
prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
for policy in storage_policy.POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't lose this one!
context["test_POLICIES"] = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
obj3srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj3'))
obj4srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj4'))
obj5srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj5'))
obj6srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj6'))
context["test_servers"] = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv,
obj4srv, obj5srv, obj6srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(
listing_formats.ListingFilter(prosrv, {}, logger=prosrv.logger),
conf, logger=prosrv.logger)
# Yes, eventlet, we know -- we have to support bad clients, though
warnings.filterwarnings(
'ignore', module='eventlet',
message='capitalize_response_headers is disabled')
prospa = spawn(wsgi.server, prolis, logging_prosv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl,
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
context["test_coros"] = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa,
obj4spa, obj5spa, obj6spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create another account
# used for account-to-account tests
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a1')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT',
'/a1',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
# Create container in other account
# used for account-to-account tests
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(
b'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected %r, encountered %r" % (exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(
b'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
return context
def teardown_servers(context):
for server in context["test_coros"]:
server.kill()
rmtree(os.path.dirname(context["testdir"]))
utils.SysLogHandler = context["orig_SysLogHandler"]
storage_policy._POLICIES = context["orig_POLICIES"]
| swift-master | test/unit/helpers.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.diskfile"""
from __future__ import print_function
import binascii
import six.moves.cPickle as pickle
import os
import errno
import itertools
import mock
import unittest
import email
import tempfile
import threading
import uuid
import xattr
import re
import six
import sys
from collections import defaultdict
from random import shuffle, randint
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from contextlib import closing, contextmanager
from gzip import GzipFile
import pyeclib.ec_iface
from eventlet import hubs, timeout, tpool
from swift.obj.diskfile import MD5_OF_EMPTY_STRING, update_auditor_status
from test import BaseTestCase
from test.debug_logger import debug_logger
from test.unit import (mock as unit_mock, temptree, mock_check_drive,
patch_policies, EMPTY_ETAG, make_timestamp_iter,
DEFAULT_TEST_EC_TYPE, requires_o_tmpfile_support_in_tmp,
encode_frag_archive_bodies, skip_if_no_xattrs)
from swift.obj import diskfile
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, Timestamp, \
encode_timestamps, O_TMPFILE, md5 as _md5
from swift.common import ring
from swift.common.splice import splice
from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace, \
DiskFileXattrNotSupported, PartitionLockTimeout
from swift.common.storage_policy import (
POLICIES, get_policy_string, StoragePolicy, ECStoragePolicy, REPL_POLICY,
EC_POLICY, PolicyError)
from test.unit.obj.common import write_diskfile
test_policies = [
StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', is_default=False,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
]
class md5(object):
def __init__(self, s=b''):
if not isinstance(s, bytes):
s = s.encode('ascii')
self.md = _md5(s, usedforsecurity=False)
def update(self, s=b''):
if not isinstance(s, bytes):
s = s.encode('ascii')
return self.md.update(s)
@property
def hexdigest(self):
return self.md.hexdigest
@property
def digest(self):
return self.md.digest
def find_paths_with_matching_suffixes(needed_matches=2, needed_suffixes=3):
paths = defaultdict(list)
while True:
path = ('a', 'c', uuid.uuid4().hex)
hash_ = hash_path(*path)
suffix = hash_[-3:]
paths[suffix].append(path)
if len(paths) < needed_suffixes:
# in the extreamly unlikely situation where you land the matches
# you need before you get the total suffixes you need - it's
# simpler to just ignore this suffix for now
continue
if len(paths[suffix]) >= needed_matches:
break
return paths, suffix
def _create_test_ring(path, policy):
ring_name = get_policy_string('object', policy)
testgz = os.path.join(path, ring_name + '.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6200},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
'port': 6200},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6200},
{'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6200},
{'id': 4, 'device': 'sda1', 'zone': 5, 'ip': '127.0.0.4',
'port': 6200},
{'id': 5, 'device': 'sda1', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200},
{'id': 6, 'device': 'sda1', 'zone': 7,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334',
'port': 6200}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
return ring.Ring(path, ring_name=ring_name,
reload_time=intended_reload_time)
def _make_datafilename(timestamp, policy, frag_index=None, durable=False):
if frag_index is None:
frag_index = randint(0, 9)
filename = timestamp.internal
if policy.policy_type == EC_POLICY:
filename += '#%d' % int(frag_index)
if durable:
filename += '#d'
filename += '.data'
return filename
def _make_metafilename(meta_timestamp, ctype_timestamp=None):
filename = meta_timestamp.internal
if ctype_timestamp is not None:
delta = meta_timestamp.raw - ctype_timestamp.raw
filename = '%s-%x' % (filename, delta)
filename += '.meta'
return filename
@patch_policies
class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
# Setup a test ring per policy (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.objects = os.path.join(self.devices, self.existing_device,
'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.ring = _create_test_ring(self.testdir, POLICIES.legacy)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.logger = debug_logger()
self.df_mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy):
return self.df_mgr.get_diskfile(self.existing_device,
'0', 'a', 'c', 'o',
policy=policy)
def test_relink_paths(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
created = diskfile.relink_paths(target_path, new_target_path)
self.assertTrue(created)
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual('junk', fd.read())
def test_relink_paths_makedirs_error(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
with mock.patch('swift.obj.diskfile.os.makedirs',
side_effect=Exception('oops')):
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual('oops', str(cm.exception))
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path,
ignore_missing=False)
self.assertEqual('oops', str(cm.exception))
def test_relink_paths_makedirs_race(self):
# test two concurrent relinks of the same object hash dir with race
# around makedirs
target_dir = os.path.join(self.testdir, 'd1')
# target dir exists
os.mkdir(target_dir)
target_path_1 = os.path.join(target_dir, 't1.data')
target_path_2 = os.path.join(target_dir, 't2.data')
# new target dir and files do not exist
new_target_dir = os.path.join(self.testdir, 'd2')
new_target_path_1 = os.path.join(new_target_dir, 't1.data')
new_target_path_2 = os.path.join(new_target_dir, 't2.data')
created = []
def write_and_relink(target_path, new_target_path):
with open(target_path, 'w') as fd:
fd.write(target_path)
created.append(diskfile.relink_paths(target_path, new_target_path))
calls = []
orig_makedirs = os.makedirs
def mock_makedirs(path, *args):
calls.append(path)
if len(calls) == 1:
# pretend another process jumps in here and relinks same dirs
write_and_relink(target_path_2, new_target_path_2)
return orig_makedirs(path, *args)
with mock.patch('swift.obj.diskfile.os.makedirs', mock_makedirs):
write_and_relink(target_path_1, new_target_path_1)
self.assertEqual([new_target_dir, new_target_dir], calls)
self.assertTrue(os.path.isfile(new_target_path_1))
with open(new_target_path_1, 'r') as fd:
self.assertEqual(target_path_1, fd.read())
self.assertTrue(os.path.isfile(new_target_path_2))
with open(new_target_path_2, 'r') as fd:
self.assertEqual(target_path_2, fd.read())
self.assertEqual([True, True], created)
def test_relink_paths_object_dir_exists_but_not_dir(self):
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# make a file where the new object dir should be
new_target_dir = os.path.join(self.testdir, 'd2')
with open(new_target_dir, 'w') as fd:
fd.write(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.ENOTDIR, cm.exception.errno)
# make a symlink to target where the new object dir should be
os.unlink(new_target_dir)
os.symlink(target_path, new_target_dir)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.ENOTDIR, cm.exception.errno)
def test_relink_paths_os_link_error(self):
# check relink_paths raises exception from os.link
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
with open(target_path, 'w') as fd:
fd.write('junk')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
with mock.patch('swift.obj.diskfile.os.link',
side_effect=OSError(errno.EPERM, 'nope')):
with self.assertRaises(Exception) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EPERM, cm.exception.errno)
def test_relink_paths_target_path_does_not_exist(self):
# check relink_paths does not raise exception
target_dir = os.path.join(self.testdir, 'd1')
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 'f1')
new_target_path = os.path.join(self.testdir, 'd2', 'f1')
created = diskfile.relink_paths(target_path, new_target_path)
self.assertFalse(os.path.exists(target_path))
self.assertFalse(os.path.exists(new_target_path))
self.assertFalse(created)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path,
ignore_missing=False)
self.assertEqual(errno.ENOENT, cm.exception.errno)
self.assertFalse(os.path.exists(target_path))
self.assertFalse(os.path.exists(new_target_path))
def test_relink_paths_os_link_race(self):
# test two concurrent relinks of the same object hash dir with race
# around os.link
target_dir = os.path.join(self.testdir, 'd1')
# target dir exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
# new target dir and file do not exist
new_target_dir = os.path.join(self.testdir, 'd2')
new_target_path = os.path.join(new_target_dir, 't1.data')
created = []
def write_and_relink(target_path, new_target_path):
with open(target_path, 'w') as fd:
fd.write(target_path)
created.append(diskfile.relink_paths(target_path, new_target_path))
calls = []
orig_link = os.link
def mock_link(path, new_path):
calls.append((path, new_path))
if len(calls) == 1:
# pretend another process jumps in here and links same files
write_and_relink(target_path, new_target_path)
return orig_link(path, new_path)
with mock.patch('swift.obj.diskfile.os.link', mock_link):
write_and_relink(target_path, new_target_path)
self.assertEqual([(target_path, new_target_path)] * 2, calls)
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
with open(target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
self.assertEqual([True, False], created)
def test_relink_paths_different_file_exists(self):
# check for an exception if a hard link cannot be made because a
# different file already exists at new_target_path
target_dir = os.path.join(self.testdir, 'd1')
# target dir and file exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# new target dir and different file exist
new_target_dir = os.path.join(self.testdir, 'd2')
os.mkdir(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
with open(new_target_path, 'w') as fd:
fd.write(new_target_path)
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EEXIST, cm.exception.errno)
# check nothing got deleted...
self.assertTrue(os.path.isfile(target_path))
with open(target_path, 'r') as fd:
self.assertEqual(target_path, fd.read())
self.assertTrue(os.path.isfile(new_target_path))
with open(new_target_path, 'r') as fd:
self.assertEqual(new_target_path, fd.read())
def test_relink_paths_same_file_exists(self):
# check for no exception if a hard link cannot be made because a link
# to the same file already exists at the path
target_dir = os.path.join(self.testdir, 'd1')
# target dir and file exists
os.mkdir(target_dir)
target_path = os.path.join(target_dir, 't1.data')
with open(target_path, 'w') as fd:
fd.write(target_path)
# new target dir and link to same file exist
new_target_dir = os.path.join(self.testdir, 'd2')
os.mkdir(new_target_dir)
new_target_path = os.path.join(new_target_dir, 't1.data')
os.link(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
# existing link checks ok
created = diskfile.relink_paths(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
self.assertFalse(created)
# now pretend there is an error when checking that the link already
# exists - expect the EEXIST exception to be raised
orig_stat = os.stat
def mocked_stat(path):
if path == new_target_path:
raise OSError(errno.EPERM, 'cannot be sure link exists :(')
return orig_stat(path)
with mock.patch('swift.obj.diskfile.os.stat', mocked_stat):
with self.assertRaises(OSError) as cm:
diskfile.relink_paths(target_path, new_target_path)
self.assertEqual(errno.EEXIST, cm.exception.errno, str(cm.exception))
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
# ...unless while checking for an existing link the target file is
# found to no longer exists, which is ok
def mocked_stat(path):
if path == target_path:
raise OSError(errno.ENOENT, 'target longer here :)')
return orig_stat(path)
with mock.patch('swift.obj.diskfile.os.stat', mocked_stat):
created = diskfile.relink_paths(target_path, new_target_path)
with open(new_target_path, 'r') as fd:
self.assertEqual(target_path, fd.read()) # sanity check
self.assertFalse(created)
def test_extract_policy(self):
# good path names
pn = 'objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = 'objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# leading slash
pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[0])
pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy(pn), POLICIES[1])
# full paths
good_path = '/srv/node/sda1/objects-1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[1])
good_path = '/srv/node/sda1/objects/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy(good_path), POLICIES[0])
# short paths
path = '/srv/node/sda1/objects/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[0])
path = '/srv/node/sda1/objects-1/1/1234.data'
self.assertEqual(diskfile.extract_policy(path), POLICIES[1])
# well formatted but, unknown policy index
pn = 'objects-2/0/606/198427efcff042c78606/1401379842.14643.data'
self.assertIsNone(diskfile.extract_policy(pn))
# malformed path
self.assertIsNone(diskfile.extract_policy(''))
bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
pn = 'XXXX/0/606/1984527ed42b6ef6247c78606/1401379842.14643.data'
self.assertIsNone(diskfile.extract_policy(pn))
bad_path = '/srv/node/sda1/foo-1/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
bad_path = '/srv/node/sda1/obj1/1/abc/def/1234.data'
self.assertIsNone(diskfile.extract_policy(bad_path))
def test_quarantine_renamer(self):
for policy in POLICIES:
# we use this for convenience, not really about a diskfile layout
df = self._create_diskfile(policy=policy)
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined',
diskfile.get_data_dir(policy),
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc')
to_dir = diskfile.quarantine_renamer(self.devices, qbit)
self.assertEqual(to_dir, exp_dir)
self.assertRaises(OSError, diskfile.quarantine_renamer,
self.devices, qbit)
def test_get_data_dir(self):
self.assertEqual(diskfile.get_data_dir(POLICIES[0]),
diskfile.DATADIR_BASE)
self.assertEqual(diskfile.get_data_dir(POLICIES[1]),
diskfile.DATADIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_data_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_data_dir, 99)
def test_get_async_dir(self):
self.assertEqual(diskfile.get_async_dir(POLICIES[0]),
diskfile.ASYNCDIR_BASE)
self.assertEqual(diskfile.get_async_dir(POLICIES[1]),
diskfile.ASYNCDIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_async_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_async_dir, 99)
def test_get_tmp_dir(self):
self.assertEqual(diskfile.get_tmp_dir(POLICIES[0]),
diskfile.TMP_BASE)
self.assertEqual(diskfile.get_tmp_dir(POLICIES[1]),
diskfile.TMP_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_tmp_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_tmp_dir, 99)
def test_pickle_async_update_tmp_dir(self):
for policy in POLICIES:
if int(policy) == 0:
tmp_part = 'tmp'
else:
tmp_part = 'tmp-%d' % policy
tmp_path = os.path.join(
self.devices, self.existing_device, tmp_part)
self.assertFalse(os.path.isdir(tmp_path))
pickle_args = (self.existing_device, 'a', 'c', 'o',
'data', 0.0, policy)
os.makedirs(tmp_path)
# now create a async update
self.df_mgr.pickle_async_update(*pickle_args)
# check tempdir
self.assertTrue(os.path.isdir(tmp_path))
def test_get_part_path(self):
# partition passed as 'str'
part_dir = diskfile.get_part_path('/srv/node/sda1', POLICIES[0], '123')
exp_dir = '/srv/node/sda1/objects/123'
self.assertEqual(part_dir, exp_dir)
# partition passed as 'int'
part_dir = diskfile.get_part_path('/srv/node/sdb5', POLICIES[1], 123)
exp_dir = '/srv/node/sdb5/objects-1/123'
self.assertEqual(part_dir, exp_dir)
def test_can_read_old_meta(self):
# outputs taken from `xattr -l <diskfile>`
cases = {
'python_2.7.18_swift_2.13_replicated': '''
0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
0010 4C 65 6E 67 74 68 71 02 55 02 31 33 55 04 6E 61 Lengthq.U.13U.na
0020 6D 65 71 03 55 12 2F 41 55 54 48 5F 74 65 73 74 meq.U./AUTH_test
0030 2F E2 98 83 2F E2 98 83 71 04 55 13 58 2D 4F 62 /.../...q.U.X-Ob
0040 6A 65 63 74 2D 4D 65 74 61 2D 4D 74 69 6D 65 55 ject-Meta-MtimeU
0050 11 31 36 38 32 39 35 39 38 37 34 2E 37 35 36 32 .1682959874.7562
0060 30 35 71 05 55 04 45 54 61 67 71 06 55 20 36 62 05q.U.ETagq.U 6b
0070 37 64 39 61 31 63 35 64 31 36 37 63 63 35 30 30 7d9a1c5d167cc500
0080 33 37 66 32 39 66 32 39 30 62 62 33 37 35 71 07 37f29f290bb375q.
0090 55 0B 58 2D 54 69 6D 65 73 74 61 6D 70 71 08 55 U.X-Timestampq.U
00A0 10 31 36 38 32 39 36 32 36 35 31 2E 39 37 34 39 .1682962651.9749
00B0 34 55 11 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 4U.X-Object-Meta
00C0 2D E2 98 83 55 03 E2 98 83 71 09 55 0C 43 6F 6E -...U....q.U.Con
00D0 74 65 6E 74 2D 54 79 70 65 71 0A 55 18 61 70 70 tent-Typeq.U.app
00E0 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 65 74 2D 73 lication/octet-s
00F0 74 72 65 61 6D 71 0B 75 2E treamq.u.
''',
'python_2.7.18_swift_2.13_ec': '''
0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
0010 4C 65 6E 67 74 68 71 02 55 02 38 34 55 04 6E 61 Lengthq.U.84U.na
0020 6D 65 71 03 55 12 2F 41 55 54 48 5F 74 65 73 74 meq.U./AUTH_test
0030 2F E2 98 83 2F E2 98 83 71 04 58 1E 00 00 00 58 /.../...q.X....X
0040 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D -Object-Sysmeta-
0050 45 63 2D 46 72 61 67 2D 49 6E 64 65 78 71 05 55 Ec-Frag-Indexq.U
0060 01 35 55 13 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 .5U.X-Object-Met
0070 61 2D 4D 74 69 6D 65 55 11 31 36 38 32 39 35 39 a-MtimeU.1682959
0080 38 37 34 2E 37 35 36 32 30 35 71 06 58 22 00 00 874.756205q.X"..
0090 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
00A0 61 2D 45 63 2D 43 6F 6E 74 65 6E 74 2D 4C 65 6E a-Ec-Content-Len
00B0 67 74 68 71 07 55 02 31 33 71 08 58 18 00 00 00 gthq.U.13q.X....
00C0 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 X-Object-Sysmeta
00D0 2D 45 63 2D 45 74 61 67 71 09 55 20 36 62 37 64 -Ec-Etagq.U 6b7d
00E0 39 61 31 63 35 64 31 36 37 63 63 35 30 30 33 37 9a1c5d167cc50037
00F0 66 32 39 66 32 39 30 62 62 33 37 35 71 0A 55 04 f29f290bb375q.U.
0100 45 54 61 67 71 0B 55 20 65 32 66 64 34 33 30 65 ETagq.U e2fd430e
0110 61 66 37 32 32 33 63 32 35 30 33 63 34 65 38 33 af7223c2503c4e83
0120 30 31 63 66 66 33 37 63 71 0C 55 0B 58 2D 54 69 01cff37cq.U.X-Ti
0130 6D 65 73 74 61 6D 70 71 0D 55 10 31 36 38 32 39 mestampq.U.16829
0140 36 32 32 36 32 2E 31 36 31 39 39 55 11 58 2D 4F 62262.16199U.X-O
0150 62 6A 65 63 74 2D 4D 65 74 61 2D E2 98 83 55 03 bject-Meta-...U.
0160 E2 98 83 71 0E 58 1A 00 00 00 58 2D 4F 62 6A 65 ...q.X....X-Obje
0170 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 2D 53 63 ct-Sysmeta-Ec-Sc
0180 68 65 6D 65 71 0F 55 1A 6C 69 62 65 72 61 73 75 hemeq.U.liberasu
0190 72 65 63 6F 64 65 5F 72 73 5F 76 61 6E 64 20 34 recode_rs_vand 4
01A0 2B 32 71 10 55 0C 43 6F 6E 74 65 6E 74 2D 54 79 +2q.U.Content-Ty
01B0 70 65 71 11 55 18 61 70 70 6C 69 63 61 74 69 6F peq.U.applicatio
01C0 6E 2F 6F 63 74 65 74 2D 73 74 72 65 61 6D 71 12 n/octet-streamq.
01D0 58 20 00 00 00 58 2D 4F 62 6A 65 63 74 2D 53 79 X ...X-Object-Sy
01E0 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D 65 6E 74 smeta-Ec-Segment
01F0 2D 53 69 7A 65 71 13 55 07 31 30 34 38 35 37 36 -Sizeq.U.1048576
0200 71 14 75 2E q.u.
''',
'python_2.7.18_swift_2.23_replicated': '''
0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
0010 4C 65 6E 67 74 68 71 02 55 02 31 33 71 03 55 04 Lengthq.U.13q.U.
0020 6E 61 6D 65 71 04 55 12 2F 41 55 54 48 5F 74 65 nameq.U./AUTH_te
0030 73 74 2F E2 98 83 2F E2 98 83 71 05 55 0C 43 6F st/.../...q.U.Co
0040 6E 74 65 6E 74 2D 54 79 70 65 71 06 55 18 61 70 ntent-Typeq.U.ap
0050 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 65 74 2D plication/octet-
0060 73 74 72 65 61 6D 71 07 55 04 45 54 61 67 71 08 streamq.U.ETagq.
0070 55 20 36 62 37 64 39 61 31 63 35 64 31 36 37 63 U 6b7d9a1c5d167c
0080 63 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 c50037f29f290bb3
0090 37 35 71 09 55 0B 58 2D 54 69 6D 65 73 74 61 6D 75q.U.X-Timestam
00A0 70 71 0A 55 10 31 36 38 32 39 36 33 32 30 39 2E pq.U.1682963209.
00B0 38 32 32 37 32 71 0B 55 11 58 2D 4F 62 6A 65 63 82272q.U.X-Objec
00C0 74 2D 4D 65 74 61 2D E2 98 83 71 0C 55 03 E2 98 t-Meta-...q.U...
00D0 83 71 0D 55 13 58 2D 4F 62 6A 65 63 74 2D 4D 65 .q.U.X-Object-Me
00E0 74 61 2D 4D 74 69 6D 65 71 0E 55 11 31 36 38 32 ta-Mtimeq.U.1682
00F0 39 35 39 38 37 34 2E 37 35 36 32 30 35 71 0F 75 959874.756205q.u
0100 2E .
''',
'python_3.10.6_swift_2.23_replicated': '''
0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
0040 10 00 00 00 31 36 38 32 39 36 33 30 31 37 2E 31 ....1682963017.1
0050 30 34 37 32 71 06 68 03 86 71 07 52 71 08 68 01 0472q.h..q.Rq.h.
0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
00C0 52 71 11 68 01 58 02 00 00 00 31 33 71 12 68 03 Rq.h.X....13q.h.
00D0 86 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 .q.Rq.h.X....ETa
00E0 67 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 gq.h..q.Rq.h.X .
00F0 00 00 36 62 37 64 39 61 31 63 35 64 31 36 37 63 ..6b7d9a1c5d167c
0100 63 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 c50037f29f290bb3
0110 37 35 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 75q.h..q.Rq.h.X.
0120 00 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 ...X-Object-Meta
0130 2D 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D -Mtimeq.h..q.Rq.
0140 68 01 58 11 00 00 00 31 36 38 32 39 35 39 38 37 h.X....168295987
0150 34 2E 37 35 36 32 30 35 71 1E 68 03 86 71 1F 52 4.756205q.h..q.R
0160 71 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 q h.X....X-Objec
0170 74 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 t-Meta-.........
0180 82 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 ...q!h..q"Rq#h.X
0190 0C 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 ................
01A0 71 24 68 03 86 71 25 52 71 26 68 01 58 04 00 00 q$h..q%Rq&h.X...
01B0 00 6E 61 6D 65 71 27 68 03 86 71 28 52 71 29 68 .nameq'h..q(Rq)h
01C0 01 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 .X..../AUTH_test
01D0 2F C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 2A /....../......q*
01E0 68 03 86 71 2B 52 71 2C 75 2E h..q+Rq,u.
''',
'python_2.7.18_swift_2.23_ec': '''
0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
0010 4C 65 6E 67 74 68 71 02 55 02 38 34 71 03 55 04 Lengthq.U.84q.U.
0020 6E 61 6D 65 71 04 55 12 2F 41 55 54 48 5F 74 65 nameq.U./AUTH_te
0030 73 74 2F E2 98 83 2F E2 98 83 71 05 55 1E 58 2D st/.../...q.U.X-
0040 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 Object-Sysmeta-E
0050 63 2D 46 72 61 67 2D 49 6E 64 65 78 55 01 35 55 c-Frag-IndexU.5U
0060 0C 43 6F 6E 74 65 6E 74 2D 54 79 70 65 71 06 55 .Content-Typeq.U
0070 18 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 .application/oct
0080 65 74 2D 73 74 72 65 61 6D 71 07 55 22 58 2D 4F et-streamq.U"X-O
0090 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 bject-Sysmeta-Ec
00A0 2D 43 6F 6E 74 65 6E 74 2D 4C 65 6E 67 74 68 55 -Content-LengthU
00B0 02 31 33 71 08 55 18 58 2D 4F 62 6A 65 63 74 2D .13q.U.X-Object-
00C0 53 79 73 6D 65 74 61 2D 45 63 2D 45 74 61 67 55 Sysmeta-Ec-EtagU
00D0 20 36 62 37 64 39 61 31 63 35 64 31 36 37 63 63 6b7d9a1c5d167cc
00E0 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 37 50037f29f290bb37
00F0 35 71 09 55 04 45 54 61 67 71 0A 55 20 65 32 66 5q.U.ETagq.U e2f
0100 64 34 33 30 65 61 66 37 32 32 33 63 32 35 30 33 d430eaf7223c2503
0110 63 34 65 38 33 30 31 63 66 66 33 37 63 71 0B 55 c4e8301cff37cq.U
0120 0B 58 2D 54 69 6D 65 73 74 61 6D 70 71 0C 55 10 .X-Timestampq.U.
0130 31 36 38 32 39 36 33 31 33 30 2E 33 35 39 38 36 1682963130.35986
0140 71 0D 55 11 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 q.U.X-Object-Met
0150 61 2D E2 98 83 71 0E 55 03 E2 98 83 71 0F 55 1A a-...q.U....q.U.
0160 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 X-Object-Sysmeta
0170 2D 45 63 2D 53 63 68 65 6D 65 55 1A 6C 69 62 65 -Ec-SchemeU.libe
0180 72 61 73 75 72 65 63 6F 64 65 5F 72 73 5F 76 61 rasurecode_rs_va
0190 6E 64 20 34 2B 32 71 10 55 13 58 2D 4F 62 6A 65 nd 4+2q.U.X-Obje
01A0 63 74 2D 4D 65 74 61 2D 4D 74 69 6D 65 71 11 55 ct-Meta-Mtimeq.U
01B0 11 31 36 38 32 39 35 39 38 37 34 2E 37 35 36 32 .1682959874.7562
01C0 30 35 71 12 55 20 58 2D 4F 62 6A 65 63 74 2D 53 05q.U X-Object-S
01D0 79 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D 65 6E ysmeta-Ec-Segmen
01E0 74 2D 53 69 7A 65 55 07 31 30 34 38 35 37 36 71 t-SizeU.1048576q
01F0 13 75 2E .u.
''',
'python_3.10.6_swift_2.23_ec': '''
0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
0040 10 00 00 00 31 36 38 32 39 36 32 39 35 35 2E 33 ....1682962955.3
0050 37 35 34 36 71 06 68 03 86 71 07 52 71 08 68 01 7546q.h..q.Rq.h.
0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
00C0 52 71 11 68 01 58 02 00 00 00 38 34 71 12 68 03 Rq.h.X....84q.h.
00D0 86 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 .q.Rq.h.X....ETa
00E0 67 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 gq.h..q.Rq.h.X .
00F0 00 00 65 32 66 64 34 33 30 65 61 66 37 32 32 33 ..e2fd430eaf7223
0100 63 32 35 30 33 63 34 65 38 33 30 31 63 66 66 33 c2503c4e8301cff3
0110 37 63 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 7cq.h..q.Rq.h.X.
0120 00 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 ...X-Object-Meta
0130 2D 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D -Mtimeq.h..q.Rq.
0140 68 01 58 11 00 00 00 31 36 38 32 39 35 39 38 37 h.X....168295987
0150 34 2E 37 35 36 32 30 35 71 1E 68 03 86 71 1F 52 4.756205q.h..q.R
0160 71 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 q h.X....X-Objec
0170 74 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 t-Meta-.........
0180 82 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 ...q!h..q"Rq#h.X
0190 0C 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 ................
01A0 71 24 68 03 86 71 25 52 71 26 68 01 58 18 00 00 q$h..q%Rq&h.X...
01B0 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
01C0 61 2D 45 63 2D 45 74 61 67 71 27 68 03 86 71 28 a-Ec-Etagq'h..q(
01D0 52 71 29 68 01 58 20 00 00 00 36 62 37 64 39 61 Rq)h.X ...6b7d9a
01E0 31 63 35 64 31 36 37 63 63 35 30 30 33 37 66 32 1c5d167cc50037f2
01F0 39 66 32 39 30 62 62 33 37 35 71 2A 68 03 86 71 9f290bb375q*h..q
0200 2B 52 71 2C 68 01 58 22 00 00 00 58 2D 4F 62 6A +Rq,h.X"...X-Obj
0210 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 2D 43 ect-Sysmeta-Ec-C
0220 6F 6E 74 65 6E 74 2D 4C 65 6E 67 74 68 71 2D 68 ontent-Lengthq-h
0230 03 86 71 2E 52 71 2F 68 01 58 02 00 00 00 31 33 ..q.Rq/h.X....13
0240 71 30 68 03 86 71 31 52 71 32 68 01 58 1E 00 00 q0h..q1Rq2h.X...
0250 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
0260 61 2D 45 63 2D 46 72 61 67 2D 49 6E 64 65 78 71 a-Ec-Frag-Indexq
0270 33 68 03 86 71 34 52 71 35 68 01 58 01 00 00 00 3h..q4Rq5h.X....
0280 35 71 36 68 03 86 71 37 52 71 38 68 01 58 1A 00 5q6h..q7Rq8h.X..
0290 00 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 ..X-Object-Sysme
02A0 74 61 2D 45 63 2D 53 63 68 65 6D 65 71 39 68 03 ta-Ec-Schemeq9h.
02B0 86 71 3A 52 71 3B 68 01 58 1A 00 00 00 6C 69 62 .q:Rq;h.X....lib
02C0 65 72 61 73 75 72 65 63 6F 64 65 5F 72 73 5F 76 erasurecode_rs_v
02D0 61 6E 64 20 34 2B 32 71 3C 68 03 86 71 3D 52 71 and 4+2q<h..q=Rq
02E0 3E 68 01 58 20 00 00 00 58 2D 4F 62 6A 65 63 74 >h.X ...X-Object
02F0 2D 53 79 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D -Sysmeta-Ec-Segm
0300 65 6E 74 2D 53 69 7A 65 71 3F 68 03 86 71 40 52 ent-Sizeq?h..q@R
0310 71 41 68 01 58 07 00 00 00 31 30 34 38 35 37 36 qAh.X....1048576
0320 71 42 68 03 86 71 43 52 71 44 68 01 58 04 00 00 qBh..qCRqDh.X...
0330 00 6E 61 6D 65 71 45 68 03 86 71 46 52 71 47 68 .nameqEh..qFRqGh
0340 01 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 .X..../AUTH_test
0350 2F C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 48 /....../......qH
0360 68 03 86 71 49 52 71 4A 75 2E h..qIRqJu.
''',
'python3.8.10_swift_2.31.1_replicated': '''
0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
0040 10 00 00 00 31 36 38 33 30 36 35 34 37 38 2E 32 ....1683065478.2
0050 35 30 30 34 71 06 68 03 86 71 07 52 71 08 68 01 5004q.h..q.Rq.h.
0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
00C0 52 71 11 68 01 58 01 00 00 00 38 71 12 68 03 86 Rq.h.X....8q.h..
00D0 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 67 q.Rq.h.X....ETag
00E0 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 00 q.h..q.Rq.h.X ..
00F0 00 37 30 63 31 64 62 35 36 66 33 30 31 63 39 65 .70c1db56f301c9e
0100 33 33 37 62 30 30 39 39 62 64 34 31 37 34 62 32 337b0099bd4174b2
0110 38 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 00 8q.h..q.Rq.h.X..
0120 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 2D ..X-Object-Meta-
0130 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D 68 Mtimeq.h..q.Rq.h
0140 01 58 11 00 00 00 31 36 38 33 30 36 34 39 33 38 .X....1683064938
0150 2E 36 39 39 30 32 37 71 1E 68 03 86 71 1F 52 71 .699027q.h..q.Rq
0160 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 74 h.X....X-Object
0170 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 82 -Meta-..........
0180 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 0C ..q!h..q"Rq#h.X.
0190 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 71 ...............q
01A0 24 68 03 86 71 25 52 71 26 68 01 58 04 00 00 00 $h..q%Rq&h.X....
01B0 6E 61 6D 65 71 27 68 03 86 71 28 52 71 29 68 01 nameq'h..q(Rq)h.
01C0 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 2F X..../AUTH_test/
01D0 C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 2A 68 ....../......q*h
01E0 03 86 71 2B 52 71 2C 75 2E ..q+Rq,u.
''',
}
def raw_xattr(output):
return binascii.unhexlify(''.join(
line[7:55] for line in output.split('\n')
).replace(' ', ''))
path = os.path.join(self.testdir, str(uuid.uuid4()))
for case, xattr_output in cases.items():
try:
to_write = raw_xattr(xattr_output)
with open(path, 'wb') as fp:
xattr.setxattr(
fp.fileno(), 'user.swift.metadata', to_write)
with open(path, 'rb') as fd:
actual = diskfile.read_metadata(fd)
# name should come out as native strings
expected_name = b'/AUTH_test/\xe2\x98\x83/\xe2\x98\x83'
if not six.PY2:
expected_name = expected_name.decode('utf8')
self.assertEqual(actual['name'], expected_name)
# other meta will be WSGI strings, though
self.assertEqual(
actual['X-Object-Meta-\xe2\x98\x83'], '\xe2\x98\x83')
except Exception:
print('Failure in %s' % case, file=sys.stderr)
raise
def test_write_read_metadata(self):
path = os.path.join(self.testdir, str(uuid.uuid4()))
metadata = {'name': '/a/c/o',
'Content-Length': 99,
u'X-Object-Sysmeta-Ec-Frag-Index': 4,
u'X-Object-Meta-Strange': u'should be bytes',
b'X-Object-Meta-x\xff': b'not utf8 \xff',
u'X-Object-Meta-y\xe8': u'not ascii \xe8'}
as_bytes = {b'name': b'/a/c/o',
b'Content-Length': 99,
b'X-Object-Sysmeta-Ec-Frag-Index': 4,
b'X-Object-Meta-Strange': b'should be bytes',
b'X-Object-Meta-x\xff': b'not utf8 \xff',
b'X-Object-Meta-y\xc3\xa8': b'not ascii \xc3\xa8'}
if six.PY2:
as_native = as_bytes
else:
as_native = dict((k.decode('utf-8', 'surrogateescape'),
v if isinstance(v, int) else
v.decode('utf-8', 'surrogateescape'))
for k, v in as_bytes.items())
def check_metadata(expected, typ):
with open(path, 'rb') as fd:
actual = diskfile.read_metadata(fd)
self.assertEqual(expected, actual)
for k, v in actual.items():
self.assertIsInstance(k, typ)
self.assertIsInstance(v, (typ, int))
# Check can write raw bytes
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, as_bytes)
check_metadata(as_native, str)
# Check can write native (with surrogates on py3)
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, as_native)
check_metadata(as_native, str)
# Check can write some crazy mix
with open(path, 'wb') as fd:
diskfile.write_metadata(fd, metadata)
check_metadata(as_native, str)
@patch_policies
class TestObjectAuditLocationGenerator(unittest.TestCase):
def _make_file(self, path):
try:
os.makedirs(os.path.dirname(path))
except OSError as err:
if err.errno != errno.EEXIST:
raise
with open(path, 'w'):
pass
def test_audit_location_class(self):
al = diskfile.AuditLocation('abc', '123', '_-_',
policy=POLICIES.legacy)
self.assertEqual(str(al), 'abc')
def test_finding_of_hashdirs(self):
with temptree([]) as tmpdir:
# the good
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"5c1fdc1ffb12e5eaf84edc30d8b67aca"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"fdfd184d39080020bc8b487f8a7beaca"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "1519", "df2",
"b0fe7af831cc7b1af5bf486b1c841df2"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects", "9720", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects", "3071", "8eb",
"fcd938702024c25fef6c32fef05298eb"))
os.makedirs(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"))
self._make_file(os.path.join(tmpdir, "sdp", "objects", "1519",
"fed"))
self._make_file(os.path.join(tmpdir, "sdq", "objects", "9876"))
# the empty
os.makedirs(os.path.join(tmpdir, "sdr"))
os.makedirs(os.path.join(tmpdir, "sds", "objects"))
os.makedirs(os.path.join(tmpdir, "sdt", "objects", "9601"))
os.makedirs(os.path.join(tmpdir, "sdu", "objects", "6499", "f80"))
# the irrelevant
os.makedirs(os.path.join(tmpdir, "sdv", "accounts", "77", "421",
"4b8c86149a6d532f4af018578fd9f421"))
os.makedirs(os.path.join(tmpdir, "sdw", "containers", "28", "51e",
"4f9eee668b66c6f0250bfa3c7ab9e51e"))
logger = debug_logger()
loc_generators = []
datadirs = ["objects", "objects-1"]
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
expected = \
[(os.path.join(tmpdir, "sdp", "objects-1", "9970", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
"sdp", "9970", POLICIES[1]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"5c1fdc1ffb12e5eaf84edc30d8b67aca"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "aca",
"fdfd184d39080020bc8b487f8a7beaca"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "1519", "df2",
"b0fe7af831cc7b1af5bf486b1c841df2"),
"sdp", "1519", POLICIES[0]),
(os.path.join(tmpdir, "sdp", "objects", "9720", "ca5",
"4a943bc72c2e647c4675923d58cf4ca5"),
"sdp", "9720", POLICIES[0]),
(os.path.join(tmpdir, "sdq", "objects", "3071", "8eb",
"fcd938702024c25fef6c32fef05298eb"),
"sdq", "3071", POLICIES[0]),
]
self.assertEqual(locations, expected)
# Reset status file for next run
for datadir in datadirs:
diskfile.clear_auditor_status(tmpdir, datadir)
# now without a logger
for datadir in datadirs:
loc_generators.append(
diskfile.object_audit_location_generator(
devices=tmpdir, datadir=datadir, mount_check=False,
logger=logger))
all_locs = itertools.chain(*loc_generators)
locations = [(loc.path, loc.device, loc.partition, loc.policy) for
loc in all_locs]
locations.sort()
self.assertEqual(locations, expected)
def test_skipping_unmounted_devices(self):
with temptree([]) as tmpdir, mock_check_drive() as mocks:
mocks['ismount'].side_effect = lambda path: path.endswith('sdp')
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"))
os.makedirs(os.path.join(tmpdir, "sdq", "objects",
"9785", "a10",
"4993d582f41be9771505a8d4cb237a10"))
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True)]
locations.sort()
self.assertEqual(
locations,
[(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"),
"sdp", "2607", POLICIES[0])])
# Do it again, this time with a logger.
logger = debug_logger()
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/sdq is not mounted' % tmpdir,
], debug_lines)
def test_skipping_files(self):
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"))
with open(os.path.join(tmpdir, "garbage"), "wb"):
pass
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=False)]
self.assertEqual(
locations,
[(os.path.join(tmpdir, "sdp", "objects",
"2607", "df3",
"ec2871fe724411f91787462f97d30df3"),
"sdp", "2607", POLICIES[0])])
# Do it again, this time with a logger.
logger = debug_logger('test')
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=False,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/garbage is not a directory' % tmpdir,
], debug_lines)
logger.clear()
with mock_check_drive() as mocks:
mocks['ismount'].side_effect = lambda path: (
False if path.endswith('garbage') else True)
locations = [
(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=tmpdir, datadir="objects", mount_check=True,
logger=logger)]
debug_lines = logger.get_lines_for_level('debug')
self.assertEqual([
'Skipping: %s/garbage is not mounted' % tmpdir,
], debug_lines)
def test_only_catch_expected_errors(self):
# Crazy exceptions should still escape object_audit_location_generator
# so that errors get logged and a human can see what's going wrong;
# only normal FS corruption should be skipped over silently.
def list_locations(dirname, datadir):
return [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=dirname, datadir=datadir, mount_check=False)]
real_listdir = os.listdir
def splode_if_endswith(suffix):
def sploder(path):
if path.endswith(suffix):
raise OSError(errno.EACCES, "don't try to ad-lib")
else:
return real_listdir(path)
return sploder
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects",
"2607", "b54",
"fe450ec990a88cc4b252b181bab04b54"))
with mock.patch('os.listdir', splode_if_endswith("sdf/objects")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("2607")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
with mock.patch('os.listdir', splode_if_endswith("b54")):
self.assertRaises(OSError, list_locations, tmpdir, "objects")
def test_auditor_status(self):
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "2", "a", "b"))
datadir = "objects"
# Pretend that some time passed between each partition
with mock.patch('os.stat') as mock_stat, \
mock_check_drive(isdir=True):
mock_stat.return_value.st_mtime = time() - 60
# Auditor starts, there are two partitions to check
gen = diskfile.object_audit_location_generator(tmpdir,
datadir,
False)
next(gen)
next(gen)
# Auditor stopped for some reason without raising StopIterator in
# the generator and restarts There is now only one remaining
# partition to check
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
next(gen)
# There are no more remaining partitions
self.assertRaises(StopIteration, next, gen)
# There are no partitions to check if the auditor restarts another
# time and the status files have not been cleared
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
self.assertRaises(StopIteration, next, gen)
# Reset status file
diskfile.clear_auditor_status(tmpdir, datadir)
# If the auditor restarts another time, we expect to
# check two partitions again, because the remaining
# partitions were empty and a new listdir was executed
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
False)
with mock_check_drive(isdir=True):
next(gen)
next(gen)
def test_update_auditor_status_throttle(self):
# If there are a lot of nearly empty partitions, the
# update_auditor_status will write the status file many times a second,
# creating some unexpected high write load. This test ensures that the
# status file is only written once a minute.
with temptree([]) as tmpdir:
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
with mock.patch('swift.obj.diskfile.open') as mock_open:
# File does not exist yet - write expected
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(1, mock_open.call_count)
mock_open.reset_mock()
# File exists, updated just now - no write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time()
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(0, mock_open.call_count)
mock_open.reset_mock()
# File exists, updated just now, but empty partition list. This
# is a finalizing call, write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time()
update_auditor_status(tmpdir, None, [], "ALL")
self.assertEqual(1, mock_open.call_count)
mock_open.reset_mock()
# File updated more than 60 seconds ago - write expected
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value.st_mtime = time() - 61
update_auditor_status(tmpdir, None, ['42'], "ALL")
self.assertEqual(1, mock_open.call_count)
class TestDiskFileRouter(unittest.TestCase):
@patch_policies(test_policies)
def test_policy(self):
conf = {}
logger = debug_logger('test-' + self.__class__.__name__)
df_router = diskfile.DiskFileRouter(conf, logger)
manager_0 = df_router[POLICIES[0]]
self.assertTrue(isinstance(manager_0, diskfile.DiskFileManager))
manager_1 = df_router[POLICIES[1]]
self.assertTrue(isinstance(manager_1, diskfile.ECDiskFileManager))
# The DiskFileRouter should not have to load the policy again
with mock.patch('swift.common.storage_policy.BaseStoragePolicy.' +
'get_diskfile_manager') as mock_load:
manager_3 = df_router[POLICIES[0]]
mock_load.assert_not_called()
self.assertIs(manager_3, manager_0)
self.assertTrue(isinstance(manager_3, diskfile.DiskFileManager))
def test_invalid_policy_config(self):
# verify that invalid policy diskfile configs are detected when the
# DiskfileRouter is created
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='erasure_coding.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Invalid diskfile_module erasure_coding.fs',
str(cm.exception))
bad_policy = ECStoragePolicy(0, name='one', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
diskfile_module='replication.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Invalid diskfile_module replication.fs',
str(cm.exception))
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='thin_air.fs')
with patch_policies([bad_policy]):
with self.assertRaises(PolicyError) as cm:
diskfile.DiskFileRouter({}, debug_logger())
self.assertIn('Unable to load diskfile_module thin_air.fs',
str(cm.exception))
class BaseDiskFileTestMixin(object):
"""
Bag of helpers that are useful in the per-policy DiskFile test classes,
plus common setUp and tearDown methods.
"""
# set mgr_cls on subclasses
mgr_cls = None
def setUp(self):
skip_if_no_xattrs()
self.tmpdir = mkdtemp()
self.testdir = os.path.join(
self.tmpdir, 'tmp_test_obj_server_DiskFile')
self.existing_device = 'sda1'
self.existing_device2 = 'sda2'
for policy in POLICIES:
mkdirs(os.path.join(self.testdir, self.existing_device,
diskfile.get_tmp_dir(policy)))
mkdirs(os.path.join(self.testdir, self.existing_device2,
diskfile.get_tmp_dir(policy)))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(devices=self.testdir, mount_check='false',
keep_cache_size=2 * 1024, mb_per_sync=1)
self.logger = debug_logger('test-' + self.__class__.__name__)
self.df_mgr = self.mgr_cls(self.conf, self.logger)
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
self._ts_iter = (Timestamp(t) for t in
itertools.count(int(time())))
def tearDown(self):
rmtree(self.tmpdir, ignore_errors=True)
tpool.execute = self._orig_tpool_exc
def _manager_mock(self, manager_attribute_name, df=None):
mgr_cls = df._manager.__class__ if df else self.mgr_cls
return '.'.join([
mgr_cls.__module__, mgr_cls.__name__, manager_attribute_name])
class DiskFileManagerMixin(BaseDiskFileTestMixin):
"""
Abstract test method mixin for concrete test cases - this class
won't get picked up by test runners because it doesn't subclass
unittest.TestCase and doesn't have [Tt]est in the name.
"""
def _get_diskfile(self, policy, frag_index=None, **kwargs):
df_mgr = self.df_router[policy]
return df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=frag_index,
**kwargs)
def test_init(self):
for policy in POLICIES:
df_router = diskfile.DiskFileRouter({}, self.logger)
df_mgr = df_router[policy]
self.assertEqual('/srv/node', df_mgr.devices)
self.assertEqual(604800, df_mgr.reclaim_age)
self.assertEqual(60.0, df_mgr.commit_window)
self.assertTrue(df_mgr.mount_check)
for policy in POLICIES:
conf = dict(devices=self.testdir,
mount_check='false',
reclaim_age=1000,
commit_window=10.1)
df_router = diskfile.DiskFileRouter(conf, self.logger)
df_mgr = df_router[policy]
self.assertEqual(self.testdir, df_mgr.devices)
self.assertEqual(1000, df_mgr.reclaim_age)
self.assertEqual(10.1, df_mgr.commit_window)
self.assertFalse(df_mgr.mount_check)
def test_init_commit_window(self):
def assert_ok(value, expected):
for policy in POLICIES:
conf = {'commit_window': value}
df_mgr = diskfile.DiskFileRouter(conf, self.logger)[policy]
self.assertEqual(expected, df_mgr.commit_window)
assert_ok(10.1, 10.1)
assert_ok('10.1', 10.1)
assert_ok(0, 0.0)
def assert_invalid(value):
for policy in POLICIES:
conf = {'commit_window': value}
with self.assertRaises(ValueError):
diskfile.DiskFileRouter(conf, self.logger)[policy]
assert_invalid(-1.1)
assert_invalid('-1.1')
assert_invalid('auto')
def test_cleanup_uses_configured_reclaim_age(self):
# verify that the reclaim_age used when cleaning up tombstones is
# either the default or the configured value
def do_test(ts, expect_reclaim):
for policy in POLICIES:
self.df_router = diskfile.DiskFileRouter(
self.conf, self.logger)
df = self._get_diskfile(policy)
df.delete(ts.internal)
tombstone_file = os.path.join(df._datadir, ts.internal + '.ts')
# cleanup_ondisk_files always uses the configured value
df._manager.cleanup_ondisk_files(
os.path.dirname(tombstone_file))
self.assertNotEqual(
expect_reclaim, os.path.exists(tombstone_file))
# reclaim_age not configured so default should be used
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE - 1), True)
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE + 100), False)
# reclaim_age configured value should be used
self.conf['reclaim_age'] = 1000
do_test(Timestamp(time() - diskfile.DEFAULT_RECLAIM_AGE + 100), True)
do_test(Timestamp(time() - 1001), True)
do_test(Timestamp(time() + 100), False)
def _test_get_ondisk_files(self, scenarios, policy,
frag_index=None, **kwargs):
class_under_test = self._get_diskfile(
policy, frag_index=frag_index, **kwargs)
for test in scenarios:
# test => [('filename.ext', '.ext'|False, ...), ...]
expected = {
ext[1:] + '_file': os.path.join(
class_under_test._datadir, filename)
for (filename, ext) in [v[:2] for v in test]
if ext in ('.data', '.meta', '.ts')}
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
for _order in ('ordered', 'shuffled', 'shuffled'):
class_under_test = self._get_diskfile(
policy, frag_index=frag_index, **kwargs)
try:
actual = class_under_test._get_ondisk_files(files)
self._assertDictContainsSubset(
expected, actual,
'Expected %s from %s but got %s'
% (expected, files, actual))
except AssertionError as e:
self.fail('%s with files %s' % (str(e), files))
shuffle(files)
def _test_cleanup_ondisk_files(self, scenarios, policy,
reclaim_age=None, commit_window=None):
# check that expected files are left in hashdir after cleanup
for test in scenarios:
class_under_test = self.df_router[policy]
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
hashdir = os.path.join(self.testdir, str(uuid.uuid4()))
os.mkdir(hashdir)
for fname in files:
open(os.path.join(hashdir, fname), 'w')
expected_after_cleanup = set([f[0] for f in test
if (f[2] if len(f) > 2 else f[1])])
if commit_window is not None:
class_under_test.commit_window = commit_window
if reclaim_age:
class_under_test.reclaim_age = reclaim_age
class_under_test.cleanup_ondisk_files(hashdir)
else:
with mock.patch('swift.obj.diskfile.time') as mock_time:
# don't reclaim anything
mock_time.time.return_value = 0.0
class_under_test.cleanup_ondisk_files(hashdir)
if expected_after_cleanup:
after_cleanup = set(os.listdir(hashdir))
errmsg = "expected %r, got %r for test %r" % (
sorted(expected_after_cleanup), sorted(after_cleanup), test
)
self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
else:
self.assertFalse(os.path.exists(hashdir))
def _test_yield_hashes_cleanup(self, scenarios, policy):
# opportunistic test to check that yield_hashes cleans up dir using
# same scenarios as passed to _test_cleanup_ondisk_files_files
for test in scenarios:
class_under_test = self.df_router[policy]
# list(zip(...)) for py3 compatibility (zip is lazy there)
files = list(list(zip(*test))[0])
dev_path = os.path.join(self.testdir, str(uuid.uuid4()))
hashdir = os.path.join(
dev_path, diskfile.get_data_dir(policy),
'0', 'abc', '9373a92d072897b136b3fc06595b4abc')
os.makedirs(hashdir)
for fname in files:
open(os.path.join(hashdir, fname), 'w')
expected_after_cleanup = set([f[0] for f in test
if f[1] or len(f) > 2 and f[2]])
with mock.patch('swift.obj.diskfile.time') as mock_time:
# don't reclaim anything
mock_time.time.return_value = 0.0
mocked = 'swift.obj.diskfile.BaseDiskFileManager.get_dev_path'
with mock.patch(mocked) as mock_path:
mock_path.return_value = dev_path
for _ in class_under_test.yield_hashes(
'ignored', '0', policy, suffixes=['abc']):
# return values are tested in test_yield_hashes_*
pass
if expected_after_cleanup:
after_cleanup = set(os.listdir(hashdir))
errmsg = "expected %r, got %r for test %r" % (
sorted(expected_after_cleanup), sorted(after_cleanup), test
)
self.assertEqual(expected_after_cleanup, after_cleanup, errmsg)
else:
self.assertFalse(os.path.exists(hashdir))
def test_get_ondisk_files_with_empty_dir(self):
files = []
expected = dict(
data_file=None, meta_file=None, ctype_file=None, ts_file=None)
for policy in POLICIES:
for frag_index in (0, None, '13'):
# check manager
df_mgr = self.df_router[policy]
datadir = os.path.join('/srv/node/sdb1/',
diskfile.get_data_dir(policy))
actual = df_mgr.get_ondisk_files(files, datadir)
self._assertDictContainsSubset(expected, actual)
# check diskfile under the hood
df = self._get_diskfile(policy, frag_index=frag_index)
actual = df._get_ondisk_files(files)
self._assertDictContainsSubset(expected, actual)
# check diskfile open
self.assertRaises(DiskFileNotExist, df.open)
def test_get_ondisk_files_with_unexpected_file(self):
unexpected_files = ['junk', 'junk.data', '.junk']
timestamp = next(make_timestamp_iter())
tomb_file = timestamp.internal + '.ts'
for policy in POLICIES:
for unexpected in unexpected_files:
self.logger.clear()
files = [unexpected, tomb_file]
df_mgr = self.df_router[policy]
datadir = os.path.join('/srv/node/sdb1/',
diskfile.get_data_dir(policy))
results = df_mgr.get_ondisk_files(files, datadir)
expected = {'ts_file': os.path.join(datadir, tomb_file)}
self._assertDictContainsSubset(expected, results)
log_lines = df_mgr.logger.get_lines_for_level('warning')
self.assertTrue(
log_lines[0].startswith(
'Unexpected file %s'
% os.path.join(datadir, unexpected)))
def test_get_ondisk_files_no_rsync_temp_file_warning(self):
# get_ondisk_files logs no warnings for rsync temp files
class_under_test = self._get_diskfile(POLICIES[0])
files = [
'.1472017820.44503.data.QBYCYU', # rsync tempfile for a .data
'.total-bs.abcdef', # example of false positive
]
paths = [os.path.join(class_under_test._datadir, f) for f in files]
expected = {'unexpected': paths}
results = class_under_test._get_ondisk_files(files)
for k, v in expected.items():
self.assertEqual(results[k], v)
# no warnings
self.assertFalse(self.logger.get_lines_for_level('warning'))
# but we do get a debug!
lines = self.logger.get_lines_for_level('debug')
for path in paths:
expected_msg = 'Rsync tempfile: %s' % path
self.assertIn(expected_msg, lines)
def test_cleanup_ondisk_files_reclaim_non_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
[('%s.ts' % older, False, False)],
# fresh tombstone is preserved
[('%s.ts' % newer, '.ts', True)],
# tombstone reclaimed despite junk file
[('junk', False, True),
('%s.ts' % much_older, '.ts', False)],
# fresh .meta not reclaimed even if isolated
[('%s.meta' % newer, '.meta')],
# fresh .meta not reclaimed when tombstone is reclaimed
[('%s.meta' % newer, '.meta'),
('%s.ts' % older, False, False)],
# stale isolated .meta is reclaimed
[('%s.meta' % older, False, False)],
# stale .meta is reclaimed along with tombstone
[('%s.meta' % older, False, False),
('%s.ts' % older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000, commit_window=0)
def test_construct_dev_path(self):
res_path = self.df_mgr.construct_dev_path('abc')
self.assertEqual(os.path.join(self.df_mgr.devices, 'abc'), res_path)
def test_pickle_async_update(self):
self.df_mgr.logger.increment = mock.MagicMock()
ts = Timestamp(10000.0).internal
with mock.patch('swift.obj.diskfile.write_pickle') as wp:
self.df_mgr.pickle_async_update(self.existing_device,
'a', 'c', 'o',
dict(a=1, b=2), ts, POLICIES[0])
dp = self.df_mgr.construct_dev_path(self.existing_device)
ohash = diskfile.hash_path('a', 'c', 'o')
wp.assert_called_with({'a': 1, 'b': 2},
os.path.join(
dp, diskfile.get_async_dir(POLICIES[0]),
ohash[-3:], ohash + '-' + ts),
os.path.join(dp, 'tmp'))
self.df_mgr.logger.increment.assert_called_with('async_pendings')
def test_object_audit_location_generator(self):
locations = list(
self.df_mgr.object_audit_location_generator(POLICIES[0]))
self.assertEqual(locations, [])
def test_replication_one_per_device_deprecation(self):
conf = dict(**self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 1)
conf = dict(replication_concurrency_per_device='0', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
conf = dict(replication_concurrency_per_device='2', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
conf = dict(replication_concurrency_per_device=2, **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
# Check backward compatibility
conf = dict(replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 1)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device is deprecated',
log_lines[-1])
conf = dict(replication_one_per_device='false', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device is deprecated',
log_lines[-1])
# If defined, new parameter has precedence
conf = dict(replication_concurrency_per_device='2',
replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
conf = dict(replication_concurrency_per_device='2',
replication_one_per_device='false', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 2)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
conf = dict(replication_concurrency_per_device='0',
replication_one_per_device='true', **self.conf)
mgr = diskfile.DiskFileManager(conf, self.logger)
self.assertEqual(mgr.replication_concurrency_per_device, 0)
log_lines = mgr.logger.get_lines_for_level('warning')
self.assertIn('replication_one_per_device ignored',
log_lines[-1])
def test_replication_lock_on(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 1
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(ReplicationLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
self.assertFalse(success)
def test_replication_lock_off(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 0
self.df_mgr.replication_lock_timeout = 0.1
# 2 locks must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
# 3 locks must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '3'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
def test_replication_lock_2(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 2
self.df_mgr.replication_lock_timeout = 0.1
# 2 locks with replication_concurrency_per_device=2 must succeed
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
# 3 locks with replication_concurrency_per_device=2 must fail
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '2'):
with self.assertRaises(ReplicationLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '3'):
success = True
self.assertFalse(success)
def test_replication_lock_another_device_fine(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 1
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
try:
with self.df_mgr.replication_lock(self.existing_device2,
POLICIES.legacy, '2'):
success = True
except ReplicationLockTimeout as err:
self.fail('Unexpected exception: %s' % err)
self.assertTrue(success)
def test_replication_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_concurrency_per_device = 2
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
success = True
self.assertFalse(success)
def test_partition_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1', name='foo'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='foo'):
success = True
self.assertFalse(success)
def test_partition_lock_same_partition_different_name(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1', name='foo'):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='bar'):
success = True
self.assertTrue(success)
def test_partition_lock_and_replication_lock_same_partition(self):
# Double check settings
self.df_mgr.replication_lock_timeout = 0.1
success = False
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='replication'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
success = True
self.assertFalse(success)
success = False
with self.df_mgr.replication_lock(self.existing_device,
POLICIES.legacy, '1'):
with self.assertRaises(PartitionLockTimeout):
with self.df_mgr.partition_lock(self.existing_device,
POLICIES.legacy, '1',
name='replication'):
success = True
self.assertFalse(success)
def test_missing_splice_warning(self):
with mock.patch('swift.common.splice.splice._c_splice', None):
self.conf['splice'] = 'yes'
mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
warnings = self.logger.get_lines_for_level('warning')
self.assertGreater(len(warnings), 0)
self.assertTrue('splice()' in warnings[-1])
self.assertFalse(mgr.use_splice)
def test_get_diskfile_from_hash_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileDeviceUnavailable,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_not_dir(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \
mock.patch(self._manager_mock(
'quarantine_renamer')) as quarantine_renamer:
osexc = OSError()
osexc.errno = errno.ENOTDIR
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
quarantine_renamer.assert_called_once_with(
'/srv/dev/',
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
'made-up-filename'))
def test_get_diskfile_from_hash_no_data(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \
mock.patch(self._manager_mock(
'quarantine_renamer')) as quarantine_renamer:
osexc = OSError()
osexc.errno = errno.ENODATA
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
quarantine_renamer.assert_called_once_with(
'/srv/dev/',
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
'made-up-filename'))
def test_get_diskfile_from_hash_no_dir(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
osexc = OSError()
osexc.errno = errno.ENOENT
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_other_oserror(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
osexc = OSError()
cleanup.side_effect = osexc
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
OSError,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_no_actual_files(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': []}
readmeta.return_value = {'name': '/a/c/o'}
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_read_metadata_problem(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.side_effect = EOFError()
self.assertRaises(
DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
def test_get_diskfile_from_hash_no_meta_name(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {}
try:
self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
POLICIES[0])
except DiskFileNotExist as err:
exc = err
self.assertEqual(str(exc), '')
def test_get_diskfile_from_hash_bad_meta_name(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
with mock.patch(self._manager_mock('diskfile_cls')), \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': 'bad'}
try:
self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900',
POLICIES[0])
except DiskFileNotExist as err:
exc = err
self.assertEqual(str(exc), '')
def test_get_diskfile_from_hash(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
mock_return = object()
with mock.patch(self._manager_mock('diskfile_cls'),
return_value=mock_return) as dfclass, \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': '/a/c/o'}
actual = self.df_mgr.get_diskfile_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
dfclass.assert_called_once_with(
self.df_mgr, '/srv/dev/', '9',
'a', 'c', 'o', policy=POLICIES[0])
cleanup.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
readmeta.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/'
'1381679759.90941.data')
self.assertEqual(mock_return, actual)
def test_get_diskfile_and_filenames_from_hash(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
mock_return = object()
with mock.patch(self._manager_mock('diskfile_cls'),
return_value=mock_return) as dfclass, \
mock.patch(self._manager_mock(
'cleanup_ondisk_files')) as cleanup, \
mock.patch('swift.obj.diskfile.read_metadata') as readmeta:
cleanup.return_value = {'files': ['1381679759.90941.data']}
readmeta.return_value = {'name': '/a/c/o'}
actual, names = self.df_mgr.get_diskfile_and_filenames_from_hash(
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
dfclass.assert_called_once_with(
self.df_mgr, '/srv/dev/', '9',
'a', 'c', 'o', policy=POLICIES[0])
cleanup.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
readmeta.assert_called_once_with(
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/'
'1381679759.90941.data')
self.assertEqual(mock_return, actual)
self.assertEqual(['1381679759.90941.data'], names)
def test_listdir_enoent(self):
oserror = OSError()
oserror.errno = errno.ENOENT
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_listdir_other_oserror(self):
oserror = OSError()
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.df_mgr.logger.error.assert_called_once_with(
'ERROR: Skipping %r due to error with listdir attempt: %s',
'path', oserror)
def test_listdir(self):
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', return_value=['abc', 'def']):
self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_yield_suffixes_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
exc = None
try:
list(self.df_mgr.yield_suffixes(self.existing_device, '9', 0))
except DiskFileDeviceUnavailable as err:
exc = err
self.assertEqual(str(exc), '')
def test_yield_suffixes(self):
self.df_mgr._listdir = mock.MagicMock(return_value=[
'abc', 'def', 'ghi', 'abcd', '012'])
dev = self.existing_device
self.assertEqual(
list(self.df_mgr.yield_suffixes(dev, '9', POLICIES[0])),
[(self.testdir + '/' + dev + '/objects/9/abc', 'abc'),
(self.testdir + '/' + dev + '/objects/9/def', 'def'),
(self.testdir + '/' + dev + '/objects/9/012', '012')])
def test_yield_hashes_dev_path_fail(self):
self.df_mgr.get_dev_path = mock.MagicMock(return_value=None)
exc = None
try:
list(self.df_mgr.yield_hashes(self.existing_device, '9',
POLICIES[0]))
except DiskFileDeviceUnavailable as err:
exc = err
self.assertEqual(str(exc), '')
def test_yield_hashes_empty(self):
def _listdir(path):
return []
with mock.patch('os.listdir', _listdir):
self.assertEqual(list(self.df_mgr.yield_hashes(
self.existing_device, '9', POLICIES[0])), [])
def test_yield_hashes_cleans_up_everything(self):
the_time = [1525354555.657585]
def mock_time():
return the_time[0]
with mock.patch('time.time', mock_time):
# Make a couple of (soon-to-be-)expired tombstones
df1 = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o1', POLICIES[0])
df1.delete(Timestamp(the_time[0]))
df1_hash = utils.hash_path('a', 'c', 'o1')
df1_suffix = df1_hash[-3:]
df2 = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o2', POLICIES[0])
df2.delete(Timestamp(the_time[0] + 1))
df2_hash = utils.hash_path('a', 'c', 'o2')
df2_suffix = df2_hash[-3:]
# sanity checks
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash,
"1525354555.65758.ts")))
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash,
"1525354556.65758.ts")))
# Cache the hashes and expire the tombstones
self.df_mgr.get_hashes(self.existing_device, '0', [], POLICIES[0])
the_time[0] += 2 * self.df_mgr.reclaim_age
hashes = list(self.df_mgr.yield_hashes(
self.existing_device, '0', POLICIES[0]))
self.assertEqual(hashes, [])
# The tombstones are gone
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash,
"1525354555.65758.ts")))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash,
"1525354556.65758.ts")))
# The empty hash dirs are gone
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix, df1_hash)))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix, df2_hash)))
# The empty suffix dirs, and partition are still there
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix)))
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix)))
# but the suffixes is invalid
part_dir = os.path.join(
self.testdir, self.existing_device, 'objects', '0')
invalidations_file = os.path.join(
part_dir, diskfile.HASH_INVALIDATIONS_FILE)
with open(invalidations_file) as f:
invalids = f.read().splitlines()
self.assertEqual(sorted((df1_suffix, df2_suffix)),
sorted(invalids)) # sanity
# next time get hashes runs
with mock.patch('time.time', mock_time):
hashes = self.df_mgr.get_hashes(
self.existing_device, '0', [], POLICIES[0])
self.assertEqual(hashes, {})
# ... suffixes will get cleanup
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df1_suffix)))
self.assertFalse(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df2_suffix)))
# but really it's not diskfile's jobs to decide if a partition belongs
# on a node or not
self.assertTrue(os.path.isdir(os.path.join(
self.testdir, self.existing_device, 'objects', '0')))
def test_focused_yield_hashes_does_not_clean_up(self):
the_time = [1525354555.657585]
def mock_time():
return the_time[0]
with mock.patch('time.time', mock_time):
df = self.df_mgr.get_diskfile(
self.existing_device, 0, 'a', 'c', 'o', POLICIES[0])
df.delete(Timestamp(the_time[0]))
df_hash = utils.hash_path('a', 'c', 'o')
df_suffix = df_hash[-3:]
# sanity check
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0',
df_suffix, df_hash,
"1525354555.65758.ts")))
# Expire the tombstone
the_time[0] += 2 * self.df_mgr.reclaim_age
hashes = list(self.df_mgr.yield_hashes(
self.existing_device, '0', POLICIES[0],
suffixes=[df_suffix]))
self.assertEqual(hashes, [])
# The partition dir is still there. Since we didn't visit all the
# suffix dirs, we didn't learn whether or not the partition dir was
# empty.
self.assertTrue(os.path.exists(os.path.join(
self.testdir, self.existing_device, 'objects', '0')))
def test_yield_hashes_empty_suffixes(self):
def _listdir(path):
return []
with mock.patch('os.listdir', _listdir):
self.assertEqual(
list(self.df_mgr.yield_hashes(self.existing_device, '9',
POLICIES[0],
suffixes=['456'])), [])
def _check_yield_hashes(self, policy, suffix_map, expected, **kwargs):
device = self.existing_device
part = '9'
part_path = os.path.join(
self.testdir, device, diskfile.get_data_dir(policy), part)
def _listdir(path):
if path == part_path:
return suffix_map.keys()
for suff, hash_map in suffix_map.items():
if path == os.path.join(part_path, suff):
return hash_map.keys()
for hash_, files in hash_map.items():
if path == os.path.join(part_path, suff, hash_):
return files
self.fail('Unexpected listdir of %r' % path)
expected_items = [
(hash_, timestamps)
for hash_, timestamps in expected.items()]
with mock.patch('os.listdir', _listdir), \
mock.patch('os.unlink'), \
mock.patch('os.rmdir'):
df_mgr = self.df_router[policy]
hash_items = list(df_mgr.yield_hashes(
device, part, policy, **kwargs))
expected = sorted(expected_items)
actual = sorted(hash_items)
# default list diff easiest to debug
self.assertEqual(expected, actual)
def test_yield_hashes_tombstones(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '.ts'],
'2222222222222222222222222222227e': [
ts2.internal + '.ts'],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': []
},
'd98': {},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '.ts',
ts2.internal + '.ts',
ts3.internal + '.ts',
]
},
'204': {
'bbbbbbbbbbbbbbbbbbbbbbbbbbbbb204': [
ts3.internal + '.ts',
]
}
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1.internal},
'2222222222222222222222222222227e': {'ts_data': ts2.internal},
'3333333333333333333333333333300b': {'ts_data': ts3.internal},
}
for policy in POLICIES:
self._check_yield_hashes(policy, suffix_map, expected,
suffixes=['27e', '00b'])
@patch_policies
class TestDiskFileManager(DiskFileManagerMixin, BaseTestCase):
mgr_cls = diskfile.DiskFileManager
def test_get_ondisk_files_with_repl_policy(self):
# Each scenario specifies a list of (filename, extension) tuples. If
# extension is set then that filename should be returned by the method
# under test for that extension type.
scenarios = [[('0000000007.00000.data', '.data')],
[('0000000007.00000.ts', '.ts')],
# older tombstone is ignored
[('0000000007.00000.ts', '.ts'),
('0000000006.00000.ts', False)],
# older data is ignored
[('0000000007.00000.data', '.data'),
('0000000006.00000.data', False),
('0000000004.00000.ts', False)],
# newest meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000.data', '.data'),
('0000000004.00000.ts', False)],
# meta older than data is ignored
[('0000000007.00000.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# meta without data is ignored
[('0000000007.00000.meta', False, True),
('0000000006.00000.ts', '.ts'),
('0000000004.00000.data', False)],
# tombstone trumps meta and data at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000.data', False)],
]
self._test_get_ondisk_files(scenarios, POLICIES[0], None)
self._test_cleanup_ondisk_files(scenarios, POLICIES[0])
self._test_yield_hashes_cleanup(scenarios, POLICIES[0])
def test_get_ondisk_files_with_stray_meta(self):
# get_ondisk_files ignores a stray .meta file
class_under_test = self._get_diskfile(POLICIES[0])
files = ['0000000007.00000.meta']
with mock.patch('swift.obj.diskfile.os.listdir', lambda *args: files):
self.assertRaises(DiskFileNotExist, class_under_test.open)
def test_verify_ondisk_files(self):
# ._verify_ondisk_files should only return False if get_ondisk_files
# has produced a bad set of files due to a bug, so to test it we need
# to probe it directly.
mgr = self.df_router[POLICIES.default]
ok_scenarios = (
{'ts_file': None, 'data_file': None, 'meta_file': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file'},
{'ts_file': 'a_file', 'data_file': None, 'meta_file': None},
)
for scenario in ok_scenarios:
self.assertTrue(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
# construct every possible invalid combination of results
vals = (None, 'a_file')
for ts_file, data_file, meta_file in [
(a, b, c) for a in vals for b in vals for c in vals]:
scenario = {
'ts_file': ts_file,
'data_file': data_file,
'meta_file': meta_file}
if scenario in ok_scenarios:
continue
self.assertFalse(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
def test_parse_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
for ext in ('.meta', '.data', '.ts'):
fname = '%s%s' % (ts.internal, ext)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual(ext, info['ext'])
def test_parse_on_disk_filename_errors(self):
mgr = self.df_router[POLICIES.default]
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename('junk', POLICIES.default)
self.assertEqual("Invalid Timestamp value in filename 'junk'",
str(cm.exception))
def test_cleanup_ondisk_files_reclaim_with_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# .data files are not reclaimed, ever
[('%s.data' % older, '.data', True)],
[('%s.data' % newer, '.data', True)],
# ... and we could have a mixture of fresh and stale .data
[('%s.data' % newer, '.data', True),
('%s.data' % older, False, False)],
# tombstone reclaimed despite newer data
[('%s.data' % newer, '.data', True),
('%s.data' % older, False, False),
('%s.ts' % much_older, '.ts', False)],
# .meta not reclaimed if there is a .data file
[('%s.meta' % older, '.meta'),
('%s.data' % much_older, '.data')]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000)
def test_yield_hashes(self):
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_yields_meta_timestamp(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
# only tombstone is yield/sync -able
'9333a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
# dangling .meta is not yielded because it cannot be sync'd
'9222a92d072897b136b3fc06595b4abc': [
ts3.internal + '.meta'],
},
'456': {
# only latest metadata timestamp
'9444a92d072897b136b3fc06595b0456': [
ts1.internal + '.data',
ts2.internal + '.meta',
ts3.internal + '.meta'],
# exemplary datadir with .meta
'9555a92d072897b136b3fc06595b7456': [
ts1.internal + '.data',
ts2.internal + '.meta'],
},
}
expected = {
'9333a92d072897b136b3fc06595b4abc':
{'ts_data': ts1},
'9444a92d072897b136b3fc06595b0456':
{'ts_data': ts1, 'ts_meta': ts3},
'9555a92d072897b136b3fc06595b7456':
{'ts_data': ts1, 'ts_meta': ts2},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_yields_content_type_timestamp(self):
hash_ = '9373a92d072897b136b3fc06595b4abc'
ts_iter = make_timestamp_iter()
ts0, ts1, ts2, ts3, ts4 = (next(ts_iter) for _ in range(5))
data_file = ts1.internal + '.data'
# no content-type delta
meta_file = ts2.internal + '.meta'
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# non-zero content-type delta
delta = ts3.raw - ts2.raw
meta_file = '%s-%x.meta' % (ts3.internal, delta)
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts3,
'ts_ctype': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# zero content-type delta
meta_file = '%s+0.meta' % ts3.internal
suffix_map = {'abc': {hash_: [data_file, meta_file]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts3,
'ts_ctype': ts3}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# content-type in second meta file
delta = ts3.raw - ts2.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4,
'ts_ctype': ts2}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# obsolete content-type in second meta file, older than data file
delta = ts3.raw - ts0.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# obsolete content-type in second meta file, same time as data file
delta = ts3.raw - ts1.raw
meta_file1 = '%s-%x.meta' % (ts3.internal, delta)
meta_file2 = '%s.meta' % ts4.internal
suffix_map = {'abc': {hash_: [data_file, meta_file1, meta_file2]}}
expected = {hash_: {'ts_data': ts1,
'ts_meta': ts4}}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
def test_yield_hashes_suffix_filter(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'])
def test_yield_hashes_fails_with_bad_ondisk_filesets(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '.data'],
'9373a92d072897b136b3fc06595ba456': [
ts1.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1},
}
try:
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
self.fail('Expected AssertionError')
except AssertionError:
pass
def test_check_policy(self):
mock_policy = mock.MagicMock()
mock_policy.policy_type = REPL_POLICY
# sanity, DiskFileManager is ok with REPL_POLICY
diskfile.DiskFileManager.check_policy(mock_policy)
# DiskFileManager raises ValueError with EC_POLICY
mock_policy.policy_type = EC_POLICY
with self.assertRaises(ValueError) as cm:
diskfile.DiskFileManager.check_policy(mock_policy)
self.assertEqual('Invalid policy_type: %s' % EC_POLICY,
str(cm.exception))
@patch_policies(with_ec_default=True)
class TestECDiskFileManager(DiskFileManagerMixin, BaseTestCase):
mgr_cls = diskfile.ECDiskFileManager
def test_get_ondisk_files_with_ec_policy_and_legacy_durable(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [
# highest frag index is chosen by default
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data older than durable is ignored
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# data older than durable ignored, even if its only data
[('0000000007.00000.durable', False, False),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# newer meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000.durable', '.durable'),
('0000000007.00000#14.data', '.data'),
('0000000004.00000.ts', False)],
# older meta is ignored
[('0000000007.00000.durable', '.durable'),
('0000000007.00000#14.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# tombstone trumps meta, data, durable at older timestamp
[('0000000006.00000.ts', '.ts'),
('0000000005.00000.meta', False),
('0000000004.00000.durable', False),
('0000000004.00000#0.data', False)],
# tombstone trumps meta, data, durable at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000.durable', False),
('0000000006.00000#0.data', False)]
]
# these scenarios have same outcome regardless of whether any
# fragment preferences are specified
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
# next scenarios have different outcomes dependent on whether a
# frag_prefs parameter is passed to diskfile constructor or not
scenarios = [
# data with no durable is ignored
[('0000000007.00000#0.data', False, True)],
# data newer than tombstone with no durable is ignored
[('0000000007.00000#0.data', False, True),
('0000000006.00000.ts', '.ts', True)],
# data newer than durable is ignored
[('0000000009.00000#2.data', False, True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000.durable', '.durable'),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data newer than durable ignored, even if its only data
[('0000000008.00000#1.data', False, True),
('0000000007.00000.durable', False, False)],
# missing durable invalidates data, older meta deleted
[('0000000007.00000.meta', False, True),
('0000000006.00000#0.data', False, True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
scenarios = [
# data with no durable is chosen
[('0000000007.00000#0.data', '.data', True)],
# data newer than tombstone with no durable is chosen
[('0000000007.00000#0.data', '.data', True),
('0000000006.00000.ts', False, True)],
# data newer than durable is chosen, older data preserved
[('0000000009.00000#2.data', '.data', True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000.durable', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True)],
# data newer than durable chosen when its only data
[('0000000008.00000#1.data', '.data', True),
('0000000007.00000.durable', False, False)],
# data plus meta chosen without durable, older meta deleted
[('0000000007.00000.meta', '.meta', True),
('0000000006.00000#0.data', '.data', True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [[('0000000007.00000.ts', '.ts')],
[('0000000007.00000.ts', '.ts'),
('0000000006.00000.ts', False)],
# highest frag index is chosen by default
[('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0.data', False, True)],
# data older than durable is ignored
[('0000000007.00000#1#d.data', '.data'),
('0000000006.00000#1.data', False),
('0000000004.00000.ts', False)],
# newer meta trumps older meta
[('0000000009.00000.meta', '.meta'),
('0000000008.00000.meta', False),
('0000000007.00000#14#d.data', '.data'),
('0000000004.00000.ts', False)],
# older meta is ignored
[('0000000007.00000#14#d.data', '.data'),
('0000000006.00000.meta', False),
('0000000004.00000.ts', False)],
# tombstone trumps meta and data at older timestamp
[('0000000006.00000.ts', '.ts'),
('0000000005.00000.meta', False),
('0000000004.00000#0#d.data', False)],
# tombstone trumps meta and data at same timestamp
[('0000000006.00000.meta', False),
('0000000006.00000.ts', '.ts'),
('0000000006.00000#0#d.data', False)],
]
# these scenarios have same outcome regardless of whether any
# fragment preferences are specified
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
# next scenarios have different outcomes dependent on whether a
# frag_prefs parameter is passed to diskfile constructor or not
scenarios = [
# non-durable is ignored
[('0000000007.00000#0.data', False, True)],
# non-durable data newer than tombstone is ignored
[('0000000007.00000#0.data', False, True),
('0000000006.00000.ts', '.ts', True)],
# data newer than durable data is ignored
[('0000000009.00000#2.data', False, True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# non-durable data ignored, older meta deleted
[('0000000007.00000.meta', False, True),
('0000000006.00000#0.data', False, True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
scenarios = [
# non-durable data is chosen
[('0000000007.00000#0.data', '.data', True)],
# non-durable data newer than tombstone is chosen
[('0000000007.00000#0.data', '.data', True),
('0000000006.00000.ts', False, True)],
# non-durable data newer than durable data is chosen, older data
# preserved
[('0000000009.00000#2.data', '.data', True),
('0000000009.00000#1.data', False, True),
('0000000008.00000#3.data', False, True),
('0000000007.00000#1#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# non-durable data plus meta chosen, older meta deleted
[('0000000007.00000.meta', '.meta', True),
('0000000006.00000#0.data', '.data', True),
('0000000005.00000.meta', False, False),
('0000000004.00000#1.data', False, True)]]
self._test_get_ondisk_files(scenarios, POLICIES.default,
frag_index=None, frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_and_frag_index_legacy(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type.
scenarios = [[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# specific frag newer than durable is ignored
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000006.00000.durable', False)],
# specific frag older than durable is ignored
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000008.00000.durable', False)],
# specific frag older than newest durable is ignored
# even if is also has a durable
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000.durable', False),
('0000000008.00000#0.data', False, True),
('0000000008.00000.durable', '.durable')],
# meta included when frag index is specified
[('0000000009.00000.meta', '.meta'),
('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data'),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# specific frag older than tombstone is ignored
[('0000000009.00000.ts', '.ts'),
('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000007.00000.durable', False)],
# no data file returned if specific frag index missing
[('0000000007.00000#2.data', False, True),
('0000000007.00000#14.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# meta ignored if specific frag index missing
[('0000000008.00000.meta', False, True),
('0000000007.00000#14.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000007.00000.durable', '.durable')],
# meta ignored if no data files
# Note: this is anomalous, because we are specifying a
# frag_index, get_ondisk_files will tolerate .meta with
# no .data
[('0000000088.00000.meta', False, True),
('0000000077.00000.durable', False, False)]
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
# scenarios for empty frag_prefs, meaning durable not required
scenarios = [
# specific frag newer than durable is chosen
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0.data', False, True),
('0000000006.00000.durable', False, False)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1,
frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_and_frag_index(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type.
scenarios = [[('0000000007.00000#2#d.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# specific frag index 1 is returned as long as one durable
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True)],
# specific frag newer than durable data is ignored
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', False, True),
('0000000007.00000#0.data', False, True),
('0000000006.00000#0#d.data', False, True)],
# specific frag older than durable data is ignored
[('0000000007.00000#2.data', False),
('0000000007.00000#1.data', False),
('0000000007.00000#0.data', False),
('0000000008.00000#0#d.data', False, True)],
# specific frag older than newest durable data is ignored
# even if is durable
[('0000000007.00000#2#d.data', False),
('0000000007.00000#1#d.data', False),
('0000000008.00000#0#d.data', False, True)],
# meta included when frag index is specified
[('0000000009.00000.meta', '.meta'),
('0000000007.00000#2#d.data', False, True),
('0000000007.00000#1#d.data', '.data'),
('0000000007.00000#0#d.data', False, True)],
# specific frag older than tombstone is ignored
[('0000000009.00000.ts', '.ts'),
('0000000007.00000#2#d.data', False),
('0000000007.00000#1#d.data', False),
('0000000007.00000#0#d.data', False)],
# no data file returned if specific frag index missing
[('0000000007.00000#2#d.data', False, True),
('0000000007.00000#14#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# meta ignored if specific frag index missing
[('0000000008.00000.meta', False, True),
('0000000007.00000#14#d.data', False, True),
('0000000007.00000#0#d.data', False, True)],
# meta ignored if no data files
# Note: this is anomalous, because we are specifying a
# frag_index, get_ondisk_files will tolerate .meta with
# no .data
[('0000000088.00000.meta', False, True)]
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
# scenarios for empty frag_prefs, meaning durable not required
scenarios = [
# specific frag newer than durable is chosen
[('0000000007.00000#2.data', False, True),
('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0.data', False, True)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1,
frag_prefs=[])
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
def test_get_ondisk_files_with_ec_policy_some_legacy(self):
# Test mixture of legacy durable files and durable data files that
# might somehow end up in the same object dir.
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set then that filename should be returned by
# the method under test for that extension type. If the optional
# 'survives' is True, the filename should still be in the dir after
# cleanup.
scenarios = [
# .durable at same timestamp is ok
[('0000000007.00000#1#d.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000007.00000.durable', False, True)],
# .durable at same timestamp is ok with non durable wanted frag
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000007.00000.durable', False, True)],
# older .durable file is cleaned up
[('0000000007.00000#1#d.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000.durable', False, False)],
# older .durable does not interfere with non durable wanted frag
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000.durable', False, False)],
# ...even if it has accompanying .data file
[('0000000007.00000#1.data', '.data', True),
('0000000007.00000#0#d.data', False, True),
('0000000006.00000#0.data', False, False),
('0000000006.00000.durable', False, False)],
# newer .durable file trumps older durable-data
[('0000000007.00000#1#d.data', False, False),
('0000000007.00000#0#d.data', False, False),
('0000000008.00000#1.data', '.data', True),
('0000000008.00000.durable', False, True)],
# newer .durable file with no .data trumps older durable-data
[('0000000007.00000#1#d.data', False, False),
('0000000007.00000#0#d.data', False, False),
('0000000008.00000.durable', False, False)],
]
self._test_get_ondisk_files(scenarios, POLICIES.default, frag_index=1)
self._test_cleanup_ondisk_files(scenarios, POLICIES.default)
self._test_yield_hashes_cleanup(scenarios, POLICIES.default)
def test_cleanup_ondisk_files_reclaim_with_data_files_legacy_durable(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# isolated legacy .durable is cleaned up immediately
[('%s.durable' % newer, False, False)],
# ...even when other older files are in dir
[('%s.durable' % older, False, False),
('%s.ts' % much_older, False, False)],
# isolated .data files are cleaned up when stale
# ...even when there is an older legacy durable
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False),
('%s#2.data' % much_older, '.data', True),
('%s#4.data' % much_older, False, True),
('%s.durable' % much_older, '.durable', True)],
# tombstone reclaimed despite much older legacy durable
[('%s.ts' % older, '.ts', False),
('%s.durable' % much_older, False, False)],
# .meta not reclaimed if there is legacy durable data
[('%s.meta' % older, '.meta', True),
('%s#4.data' % much_older, False, True),
('%s.durable' % much_older, '.durable', True)],
# stale .meta reclaimed along with stale legacy .durable
[('%s.meta' % older, False, False),
('%s.durable' % much_older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000, commit_window=0)
def test_cleanup_ondisk_files_reclaim_with_data_files(self):
# Each scenario specifies a list of (filename, extension, [survives])
# tuples. If extension is set or 'survives' is True, the filename
# should still be in the dir after cleanup.
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# isolated .data files are cleaned up when stale
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False)],
# ...even when there is an older durable fileset
[('%s#2.data' % older, False, False),
('%s#4.data' % older, False, False),
('%s#2#d.data' % much_older, '.data', True),
('%s#4#d.data' % much_older, False, True)],
# ... but preserved if still fresh
[('%s#2.data' % newer, False, True),
('%s#4.data' % newer, False, True)],
# ... and we could have a mixture of fresh and stale .data
[('%s#2.data' % newer, False, True),
('%s#4.data' % older, False, False)],
# tombstone reclaimed despite newer non-durable data
[('%s#2.data' % newer, False, True),
('%s#4.data' % older, False, False),
('%s.ts' % much_older, '.ts', False)],
# tombstone reclaimed despite much older durable
[('%s.ts' % older, '.ts', False),
('%s#4#d.data' % much_older, False, False)],
# .meta not reclaimed if there is durable data
[('%s.meta' % older, '.meta', True),
('%s#4#d.data' % much_older, False, True)],
# stale .meta reclaimed along with stale non-durable .data
[('%s.meta' % older, False, False),
('%s#4.data' % much_older, False, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000, commit_window=0)
def test_cleanup_ondisk_files_commit_window(self):
# verify that non-durable files are not reclaimed regardless of
# timestamp if written to disk within commit_window
much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
# recently written nondurables not cleaned up
[('%s#1.data' % older, True),
('%s#2.data' % newer, True),
('%s.meta' % much_older, False),
('%s.ts' % much_older, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000, commit_window=60)
# ... but if commit_window is reduced then recently written files are
# cleaned up
scenarios = [
# older *timestamps* cleaned up
[('%s#1.data' % older, False),
('%s#2.data' % newer, True),
('%s.meta' % much_older, False),
('%s.ts' % much_older, False)]]
self._test_cleanup_ondisk_files(scenarios, POLICIES.default,
reclaim_age=1000, commit_window=0)
def test_get_ondisk_files_with_stray_meta(self):
# get_ondisk_files ignores a stray .meta file
class_under_test = self._get_diskfile(POLICIES.default)
@contextmanager
def create_files(df, files):
os.makedirs(df._datadir)
for fname in files:
fpath = os.path.join(df._datadir, fname)
with open(fpath, 'w') as f:
diskfile.write_metadata(f, {'name': df._name,
'Content-Length': 0})
yield
rmtree(df._datadir, ignore_errors=True)
# sanity
good_files = [
'0000000006.00000.meta',
'0000000006.00000#1#d.data'
]
with create_files(class_under_test, good_files):
class_under_test.open()
scenarios = [['0000000007.00000.meta'],
['0000000007.00000.meta',
'0000000006.00000.durable'], # legacy durable file
['0000000007.00000.meta',
'0000000006.00000#1.data'],
['0000000007.00000.meta',
'0000000006.00000.durable', # legacy durable file
'0000000005.00000#1.data']
]
for files in scenarios:
with create_files(class_under_test, files):
try:
class_under_test.open()
except DiskFileNotExist:
continue
self.fail('expected DiskFileNotExist opening %s with %r' % (
class_under_test.__class__.__name__, files))
# Simulate another process deleting the data after we list contents
# but before we actually open them
orig_listdir = os.listdir
def deleting_listdir(d):
result = orig_listdir(d)
for f in result:
os.unlink(os.path.join(d, f))
return result
with create_files(class_under_test, good_files), \
mock.patch('swift.obj.diskfile.os.listdir',
side_effect=deleting_listdir), \
self.assertRaises(DiskFileNotExist):
class_under_test.open()
def test_verify_ondisk_files(self):
# _verify_ondisk_files should only return False if get_ondisk_files
# has produced a bad set of files due to a bug, so to test it we need
# to probe it directly.
mgr = self.df_router[POLICIES.default]
ok_scenarios = (
{'ts_file': None, 'data_file': None, 'meta_file': None,
'durable_frag_set': None},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': None,
'durable_frag_set': ['a_file']},
{'ts_file': None, 'data_file': 'a_file', 'meta_file': 'a_file',
'durable_frag_set': ['a_file']},
{'ts_file': 'a_file', 'data_file': None, 'meta_file': None,
'durable_frag_set': None},
)
for scenario in ok_scenarios:
self.assertTrue(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
# construct every possible invalid combination of results
vals = (None, 'a_file')
for ts_file, data_file, meta_file, durable_frag in [
(a, b, c, d)
for a in vals for b in vals for c in vals for d in vals]:
scenario = {
'ts_file': ts_file,
'data_file': data_file,
'meta_file': meta_file,
'durable_frag_set': [durable_frag] if durable_frag else None}
if scenario in ok_scenarios:
continue
self.assertFalse(mgr._verify_ondisk_files(scenario),
'Unexpected result for scenario %s' % scenario)
def test_parse_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
# non-durable data file
for frag in (0, 2, 13):
fname = '%s#%s.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(False, info['durable'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
# durable data file
for frag in (0, 2, 13):
fname = '%s#%s#d.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(True, info['durable'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
# data file with unexpected suffix marker, not an error in case
# alternative marker suffixes added in future
for frag in (0, 2, 13):
fname = '%s#%s#junk.data' % (ts.internal, frag)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual('.data', info['ext'])
self.assertEqual(frag, info['frag_index'])
self.assertIs(False, info['durable'])
expected = '%s#%s.data' % (ts.internal, frag)
self.assertEqual(mgr.make_on_disk_filename(**info), expected)
for ext in ('.meta', '.durable', '.ts'):
fname = '%s%s' % (ts.internal, ext)
info = mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertEqual(ts, info['timestamp'])
self.assertEqual(ext, info['ext'])
self.assertIsNone(info['frag_index'])
self.assertEqual(mgr.make_on_disk_filename(**info), fname)
def test_parse_on_disk_filename_errors(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
fname = '%s.data' % ts.internal
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertTrue(str(cm.exception).startswith("Bad fragment index"))
expected = {
'': 'bad',
'foo': 'bad',
'1.314': 'bad',
1.314: 'bad',
-2: 'negative',
'-2': 'negative',
None: 'bad',
'None': 'bad',
}
# non-durable data file
for frag, msg in expected.items():
fname = '%s#%s.data' % (ts.internal, frag)
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertIn(msg, str(cm.exception).lower())
# durable data file
for frag, msg in expected.items():
fname = '%s#%s#d.data' % (ts.internal, frag)
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename(fname, POLICIES.default)
self.assertIn(msg, str(cm.exception).lower())
with self.assertRaises(DiskFileError) as cm:
mgr.parse_on_disk_filename('junk', POLICIES.default)
self.assertEqual("Invalid Timestamp value in filename 'junk'",
str(cm.exception))
def test_make_on_disk_filename(self):
mgr = self.df_router[POLICIES.default]
for ts in (Timestamp('1234567890.00001'),
Timestamp('1234567890.00001', offset=17)):
for frag in (0, '0', 2, '2', 13, '13'):
for durable in (True, False):
expected = _make_datafilename(
ts, POLICIES.default, frag_index=frag, durable=durable)
actual = mgr.make_on_disk_filename(
ts, '.data', frag_index=frag, durable=durable)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(
actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': ts,
'frag_index': int(frag),
'ext': '.data',
'ctype_timestamp': None,
'durable': durable
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
for ext in ('.meta', '.durable', '.ts'):
expected = '%s%s' % (ts.internal, ext)
# frag index should not be required
actual = mgr.make_on_disk_filename(ts, ext)
self.assertEqual(expected, actual)
# frag index should be ignored
actual = mgr.make_on_disk_filename(
ts, ext, frag_index=frag)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(
actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': ts,
'frag_index': None,
'ext': ext,
'ctype_timestamp': None
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
actual = mgr.make_on_disk_filename(ts)
self.assertEqual(ts, actual)
def test_make_on_disk_filename_with_bad_frag_index(self):
mgr = self.df_router[POLICIES.default]
ts = Timestamp('1234567890.00001')
with self.assertRaises(DiskFileError):
# .data requires a frag_index kwarg
mgr.make_on_disk_filename(ts, '.data')
for frag in (None, 'foo', '1.314', 1.314, -2, '-2'):
with self.assertRaises(DiskFileError):
mgr.make_on_disk_filename(ts, '.data', frag_index=frag)
for ext in ('.meta', '.durable', '.ts'):
expected = '%s%s' % (ts.internal, ext)
# bad frag index should be ignored
actual = mgr.make_on_disk_filename(ts, ext, frag_index=frag)
self.assertEqual(expected, actual)
def test_make_on_disk_filename_for_meta_with_content_type(self):
# verify .meta filename encodes content-type timestamp
mgr = self.df_router[POLICIES.default]
time_ = 1234567890.00001
for delta in (0, 1, 111111):
t_meta = Timestamp(time_)
t_type = Timestamp(time_ - delta / 100000.)
sign = '-' if delta else '+'
expected = '%s%s%x.meta' % (t_meta.short, sign, delta)
actual = mgr.make_on_disk_filename(
t_meta, '.meta', ctype_timestamp=t_type)
self.assertEqual(expected, actual)
parsed = mgr.parse_on_disk_filename(actual, POLICIES.default)
self.assertEqual(parsed, {
'timestamp': t_meta,
'frag_index': None,
'ext': '.meta',
'ctype_timestamp': t_type
})
# these functions are inverse
self.assertEqual(
mgr.make_on_disk_filename(**parsed),
expected)
def test_yield_hashes_legacy_durable(self):
old_ts = Timestamp('1383180000.12345')
fresh_ts = Timestamp(time() - 10)
fresher_ts = Timestamp(time() - 1)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts.internal + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts.internal + '#2.data',
old_ts.internal + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts.internal + '.ts',
fresher_ts.internal + '#2.data',
fresher_ts.internal + '.durable'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes(self):
old_ts = Timestamp('1383180000.12345')
fresh_ts = Timestamp(time() - 10)
fresher_ts = Timestamp(time() - 1)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts.internal + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts.internal + '.ts',
fresher_ts.internal + '#2#d.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': fresh_ts},
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_yields_meta_timestamp_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta',
ts3.internal + '.meta'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'ts_meta': ts3,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# but meta timestamp is *not* returned if specified frag index
# is not found
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=3)
def test_yield_hashes_yields_meta_timestamp(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '.ts',
ts2.internal + '.meta'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta',
ts3.internal + '.meta'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta'],
},
}
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'ts_meta': ts3,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected)
# but meta timestamp is *not* returned if specified frag index
# is not found
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=3)
def test_yield_hashes_suffix_filter_legacy_durable(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '#2.data',
old_ts + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '#2.data',
fresher_ts + '.durable'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'], frag_index=2)
def test_yield_hashes_suffix_filter(self):
# test again with limited suffixes
old_ts = '1383180000.12345'
fresh_ts = Timestamp(time() - 10).internal
fresher_ts = Timestamp(time() - 1).internal
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
fresh_ts + '.ts'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
old_ts + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
fresh_ts + '.ts',
fresher_ts + '#2#d.data'],
},
'def': {},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': old_ts,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': fresher_ts,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
suffixes=['456'], frag_index=2)
def test_yield_hashes_skips_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable it shows up
suffix_map['456']['9373a92d072897b136b3fc06595b7456'] = [
ts1.internal + '#2#d.data']
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_optionally_yields_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
suffix_map = {
'abc': {
'9373a92d072897b136b3fc06595b4abc': [
ts1.internal + '#2#d.data',
ts2.internal + '#2.data'], # newer non-durable
'9373a92d072897b136b3fc06595b0abc': [
ts1.internal + '#2.data', # older non-durable
ts2.internal + '#2#d.data'],
},
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
'9373a92d072897b136b3fc06595b7456': [
ts2.internal + '#2.data'],
},
}
# sanity check non-durables not yielded
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b0abc': {'ts_data': ts2,
'durable': True},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2, frag_prefs=None)
# an empty frag_prefs list is sufficient to get non-durables yielded
# (in preference over *older* durable)
expected = {
'9373a92d072897b136b3fc06595b4abc': {'ts_data': ts2,
'durable': False},
'9373a92d072897b136b3fc06595b0abc': {'ts_data': ts2,
'durable': True},
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts2,
'durable': False},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2, frag_prefs=[])
def test_yield_hashes_skips_missing_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable'],
'9373a92d072897b136b3fc06595b7456': [
ts1.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable it shows up
suffix_map['456']['9373a92d072897b136b3fc06595b7456'].append(
ts1.internal + '.durable')
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9373a92d072897b136b3fc06595b7456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_skips_newer_data_without_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '#2.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we add a durable then newer data shows up
suffix_map['456']['9373a92d072897b136b3fc06595b0456'].append(
ts2.internal + '.durable')
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_skips_newer_non_durable_data(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '#2.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
# if we make it durable then newer data shows up
suffix_map = {
'456': {
'9373a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data',
ts2.internal + '#2#d.data',
ts3.internal + '#2.data'],
},
}
expected = {
'9373a92d072897b136b3fc06595b0456': {'ts_data': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=None)
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_ignores_bad_ondisk_filesets(self):
# this differs from DiskFileManager.yield_hashes which will fail
# when encountering a bad on-disk file set
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
suffix_map = {
'456': {
# this one is fine
'9333a92d072897b136b3fc06595b0456': [
ts1.internal + '#2#d.data'],
# this one is fine, legacy durable
'9333a92d072897b136b3fc06595b1456': [
ts1.internal + '#2.data',
ts1.internal + '.durable'],
# missing frag index
'9444a92d072897b136b3fc06595b7456': [
ts1.internal + '.data'],
# junk
'9555a92d072897b136b3fc06595b8456': [
'junk_file'],
# not durable
'9666a92d072897b136b3fc06595b9456': [
ts1.internal + '#2.data',
ts2.internal + '.meta'],
# .meta files w/o .data files can't be opened, and are ignored
'9777a92d072897b136b3fc06595ba456': [
ts1.internal + '.meta'],
# multiple meta files with no data
'9888a92d072897b136b3fc06595bb456': [
ts1.internal + '.meta',
ts2.internal + '.meta'],
# this is good with meta
'9999a92d072897b136b3fc06595bb456': [
ts1.internal + '#2#d.data',
ts2.internal + '.meta'],
# this is good with meta, legacy durable
'9999a92d072897b136b3fc06595bc456': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '.meta'],
# this one is wrong frag index
'9aaaa92d072897b136b3fc06595b0456': [
ts1.internal + '#7#d.data'],
# this one is wrong frag index, legacy durable
'9aaaa92d072897b136b3fc06595b1456': [
ts1.internal + '#7.data',
ts1.internal + '.durable'],
},
}
expected = {
'9333a92d072897b136b3fc06595b0456': {'ts_data': ts1,
'durable': True},
'9999a92d072897b136b3fc06595bb456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
'9333a92d072897b136b3fc06595b1456': {'ts_data': ts1,
'durable': True},
'9999a92d072897b136b3fc06595bc456': {'ts_data': ts1,
'ts_meta': ts2,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_filters_frag_index(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '#2#d.data',
ts1.internal + '#3#d.data',
],
'2222222222222222222222222222227e': [
ts1.internal + '#2#d.data',
ts2.internal + '#2#d.data',
],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': [
ts1.internal + '#3#d.data',
],
},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '#2.data',
ts2.internal + '#2.data',
ts3.internal + '#2#d.data',
],
},
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1,
'durable': True},
'2222222222222222222222222222227e': {'ts_data': ts2,
'durable': True},
'3333333333333333333333333333300b': {'ts_data': ts3,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def test_yield_hashes_filters_frag_index_legacy_durable(self):
ts_iter = (Timestamp(t) for t in itertools.count(int(time())))
ts1 = next(ts_iter)
ts2 = next(ts_iter)
ts3 = next(ts_iter)
suffix_map = {
'27e': {
'1111111111111111111111111111127e': [
ts1.internal + '#2.data',
ts1.internal + '#3.data',
ts1.internal + '.durable',
],
'2222222222222222222222222222227e': [
ts1.internal + '#2.data',
ts1.internal + '.durable',
ts2.internal + '#2.data',
ts2.internal + '.durable',
],
},
'd41': {
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaad41': [
ts1.internal + '#3.data',
ts1.internal + '.durable',
],
},
'00b': {
'3333333333333333333333333333300b': [
ts1.internal + '#2.data',
ts2.internal + '#2.data',
ts3.internal + '#2.data',
ts3.internal + '.durable',
],
},
}
expected = {
'1111111111111111111111111111127e': {'ts_data': ts1,
'durable': True},
'2222222222222222222222222222227e': {'ts_data': ts2,
'durable': True},
'3333333333333333333333333333300b': {'ts_data': ts3,
'durable': True},
}
self._check_yield_hashes(POLICIES.default, suffix_map, expected,
frag_index=2)
def _test_get_diskfile_from_hash_frag_index_filter(self, legacy_durable):
df = self._get_diskfile(POLICIES.default)
hash_ = os.path.basename(df._datadir)
self.assertRaises(DiskFileNotExist,
self.df_mgr.get_diskfile_from_hash,
self.existing_device, '0', hash_,
POLICIES.default) # sanity
timestamp = Timestamp.now()
for frag_index in (4, 7):
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
df4 = self.df_mgr.get_diskfile_from_hash(
self.existing_device, '0', hash_, POLICIES.default, frag_index=4)
self.assertEqual(df4._frag_index, 4)
self.assertEqual(
df4.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '4')
df7 = self.df_mgr.get_diskfile_from_hash(
self.existing_device, '0', hash_, POLICIES.default, frag_index=7)
self.assertEqual(df7._frag_index, 7)
self.assertEqual(
df7.read_metadata()['X-Object-Sysmeta-Ec-Frag-Index'], '7')
def test_get_diskfile_from_hash_frag_index_filter(self):
self._test_get_diskfile_from_hash_frag_index_filter(False)
def test_get_diskfile_from_hash_frag_index_filter_legacy_durable(self):
self._test_get_diskfile_from_hash_frag_index_filter(True)
def test_check_policy(self):
mock_policy = mock.MagicMock()
mock_policy.policy_type = EC_POLICY
# sanity, ECDiskFileManager is ok with EC_POLICY
diskfile.ECDiskFileManager.check_policy(mock_policy)
# ECDiskFileManager raises ValueError with REPL_POLICY
mock_policy.policy_type = REPL_POLICY
with self.assertRaises(ValueError) as cm:
diskfile.ECDiskFileManager.check_policy(mock_policy)
self.assertEqual('Invalid policy_type: %s' % REPL_POLICY,
str(cm.exception))
class DiskFileMixin(BaseDiskFileTestMixin):
def ts(self):
"""
Timestamps - forever.
"""
return next(self._ts_iter)
def _create_ondisk_file(self, df, data, timestamp, metadata=None,
ctype_timestamp=None,
ext='.data', legacy_durable=False, commit=True):
mkdirs(df._datadir)
if timestamp is None:
timestamp = time()
timestamp = Timestamp(timestamp)
if not metadata:
metadata = {}
if 'X-Timestamp' not in metadata:
metadata['X-Timestamp'] = timestamp.internal
if 'ETag' not in metadata:
etag = md5()
etag.update(data)
metadata['ETag'] = etag.hexdigest()
if 'name' not in metadata:
metadata['name'] = '/a/c/o'
if 'Content-Length' not in metadata:
metadata['Content-Length'] = str(len(data))
filename = timestamp.internal
if ext == '.data' and df.policy.policy_type == EC_POLICY:
if legacy_durable:
filename = '%s#%s' % (timestamp.internal, df._frag_index)
if commit:
durable_file = os.path.join(
df._datadir, '%s.durable' % timestamp.internal)
with open(durable_file, 'wb') as f:
pass
elif commit:
filename = '%s#%s#d' % (timestamp.internal, df._frag_index)
else:
filename = '%s#%s' % (timestamp.internal, df._frag_index)
if ctype_timestamp:
metadata.update(
{'Content-Type-Timestamp':
Timestamp(ctype_timestamp).internal})
filename = encode_timestamps(timestamp,
Timestamp(ctype_timestamp),
explicit=True)
data_file = os.path.join(df._datadir, filename + ext)
with open(data_file, 'wb') as f:
f.write(data)
xattr.setxattr(f.fileno(), diskfile.METADATA_KEY,
pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
def _simple_get_diskfile(self, partition='0', account='a', container='c',
obj='o', policy=None, frag_index=None, **kwargs):
policy = policy or POLICIES.default
df_mgr = self.df_router[policy]
if policy.policy_type == EC_POLICY and frag_index is None:
frag_index = 2
return df_mgr.get_diskfile(self.existing_device, partition,
account, container, obj,
policy=policy, frag_index=frag_index,
**kwargs)
def _create_test_file(self, data, timestamp=None, metadata=None,
account='a', container='c', obj='o', **kwargs):
if not isinstance(data, bytes):
raise ValueError('data must be bytes')
if metadata is None:
metadata = {}
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
df = self._simple_get_diskfile(account=account, container=container,
obj=obj, **kwargs)
if timestamp is None:
timestamp = time()
timestamp = Timestamp(timestamp)
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
if df.policy.policy_type == EC_POLICY:
data = encode_frag_archive_bodies(df.policy, data)[df._frag_index]
with df.create() as writer:
new_metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': len(data),
}
new_metadata.update(metadata)
writer.write(data)
writer.put(new_metadata)
writer.commit(timestamp)
df.open()
return df, data
def test_get_dev_path(self):
self.df_mgr.devices = '/srv'
device = 'sda1'
dev_path = os.path.join(self.df_mgr.devices, device)
mount_check = None
self.df_mgr.mount_check = True
with mock_check_drive(ismount=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(ismount=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
self.df_mgr.mount_check = False
with mock_check_drive(isdir=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(isdir=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
mount_check = True
with mock_check_drive(ismount=False):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
None)
with mock_check_drive(ismount=True):
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
mount_check = False
self.assertEqual(self.df_mgr.get_dev_path(device, mount_check),
dev_path)
def test_open_not_exist(self):
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_open_expired(self):
self.assertRaises(DiskFileExpired,
self._create_test_file,
b'1234567890', metadata={'X-Delete-At': '0'})
try:
self._create_test_file(b'1234567890', open_expired=True,
metadata={'X-Delete-At': '0',
'X-Object-Meta-Foo': 'bar'})
df = self._simple_get_diskfile(open_expired=True)
md = df.read_metadata()
self.assertEqual(md['X-Object-Meta-Foo'], 'bar')
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_open_not_expired(self):
try:
self._create_test_file(
b'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_get_metadata(self):
timestamp = self.ts().internal
df, df_data = self._create_test_file(b'1234567890',
timestamp=timestamp)
md = df.get_metadata()
self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata(self):
timestamp = self.ts().internal
self._create_test_file(b'1234567890', timestamp=timestamp)
df = self._simple_get_diskfile()
md = df.read_metadata()
self.assertEqual(md['X-Timestamp'], timestamp)
def test_read_metadata_no_xattr(self):
def mock_getxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, "Operation not supported")
with mock.patch('xattr.getxattr', mock_getxattr):
self.assertRaises(
DiskFileXattrNotSupported,
diskfile.read_metadata, 'n/a')
def test_get_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_metadata()
def test_get_datafile_metadata(self):
ts_iter = make_timestamp_iter()
body = b'1234567890'
ts_data = next(ts_iter)
metadata = {'X-Object-Meta-Test': 'test1',
'X-Object-Sysmeta-Test': 'test1'}
df, df_data = self._create_test_file(body, timestamp=ts_data.internal,
metadata=metadata)
expected = df.get_metadata()
ts_meta = next(ts_iter)
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed',
'X-Object-Sysmeta-Test': 'ignored'})
df.open()
self.assertEqual(expected, df.get_datafile_metadata())
expected.update({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'})
self.assertEqual(expected, df.get_metadata())
def test_get_datafile_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_datafile_metadata()
def test_get_metafile_metadata(self):
ts_iter = make_timestamp_iter()
body = b'1234567890'
ts_data = next(ts_iter)
metadata = {'X-Object-Meta-Test': 'test1',
'X-Object-Sysmeta-Test': 'test1'}
df, df_data = self._create_test_file(body, timestamp=ts_data.internal,
metadata=metadata)
self.assertIsNone(df.get_metafile_metadata())
# now create a meta file
ts_meta = next(ts_iter)
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'})
df.open()
expected = {'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'changed'}
self.assertEqual(expected, df.get_metafile_metadata())
def test_get_metafile_metadata_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.get_metafile_metadata()
def test_not_opened(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
with df:
pass
def test_disk_file_default_disallowed_metadata(self):
# build an object with some meta (at t0+1s)
orig_metadata = {'X-Object-Meta-Key1': 'Value1',
'X-Object-Transient-Sysmeta-KeyA': 'ValueA',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Transient-Sysmeta-KeyB': 'ValueB',
'X-Object-Meta-Key2': 'Value2'})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
# original fast-post updateable keys are removed
self.assertNotIn('X-Object-Meta-Key1', df._metadata)
self.assertNotIn('X-Object-Transient-Sysmeta-KeyA', df._metadata)
# new fast-post updateable keys are added
self.assertEqual('Value2', df._metadata['X-Object-Meta-Key2'])
self.assertEqual('ValueB',
df._metadata['X-Object-Transient-Sysmeta-KeyB'])
def test_disk_file_preserves_sysmeta(self):
# build an object with some meta (at t0)
orig_metadata = {'X-Object-Sysmeta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal,
'X-Object-Sysmeta-Key1': 'Value2',
'X-Object-Meta-Key3': 'Value3'})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
# original sysmeta keys are preserved
self.assertEqual('Value1', df._metadata['X-Object-Sysmeta-Key1'])
def test_disk_file_preserves_slo(self):
# build an object with some meta (at t0)
orig_metadata = {'X-Static-Large-Object': 'True',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
# sanity test
with df.open():
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
def test_disk_file_reader_iter(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader), df_data)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_reader_iter_w_quarantine(self):
df, df_data = self._create_test_file(b'1234567890')
def raise_dfq(m):
raise DiskFileQuarantined(m)
reader = df.reader(_quarantine_hook=raise_dfq)
reader._obj_size += 1
self.assertRaises(DiskFileQuarantined, b''.join, reader)
def test_disk_file_reader_iter_w_io_error(self):
df, df_data = self._create_test_file(b'1234567890')
class FakeFp(object):
def __init__(self, buf):
self.pos = 0
self.buf = buf
def read(self, sz):
if not self.buf:
raise IOError(5, 'Input/output error')
chunk, self.buf = self.buf, b''
self.pos += len(chunk)
return chunk
def close(self):
pass
def tell(self):
return self.pos
def raise_dfq(m):
raise DiskFileQuarantined(m)
reader = df.reader(_quarantine_hook=raise_dfq)
reader._fp = FakeFp(b'1234')
self.assertRaises(DiskFileQuarantined, b''.join, reader)
def test_disk_file_app_iter_corners(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader.app_iter_range(0, None)),
df_data)
self.assertEqual(quarantine_msgs, [])
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
self.assertEqual(b''.join(reader.app_iter_range(5, None)),
df_data[5:])
def test_disk_file_app_iter_range_w_none(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(b''.join(reader.app_iter_range(None, None)),
df_data)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_partial_closes(self):
df, df_data = self._create_test_file(b'1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_range(0, 5)
self.assertEqual(b''.join(it), df_data[:5])
self.assertEqual(quarantine_msgs, [])
self.assertTrue(reader._fp is None)
def test_disk_file_app_iter_ranges(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[:10], value)
self.assertIn(df_data[10:20], value)
self.assertIn(df_data[20:30], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_w_quarantine(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(len(df_data), reader._obj_size) # sanity check
reader._obj_size += 1
it = reader.app_iter_ranges([(0, len(df_data))],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data, value)
self.assertEqual(quarantine_msgs,
["Bytes read: %s, does not match metadata: %s" %
(len(df_data), len(df_data) + 1)])
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10)],
'plain/text',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[:10], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_edges(self):
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
self.assertIn(df_data[3:10], value)
self.assertIn(df_data[:2], value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_large_app_iter_ranges(self):
# This test case is to make sure that the disk file app_iter_ranges
# method all the paths being tested.
long_str = b'01234567890' * 65536
df, df_data = self._create_test_file(long_str)
target_strs = [df_data[3:10], df_data[0:65590]]
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301',
len(df_data))
# The produced string actually missing the MIME headers
# need to add these headers to make it as real MIME message.
# The body of the message is produced by method app_iter_ranges
# off of DiskFile object.
header = b''.join([b'Content-Type: multipart/byteranges;',
b'boundary=',
b'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + b''.join(it)
self.assertEqual(quarantine_msgs, [])
if six.PY2:
message = email.message_from_string(value)
else:
message = email.message_from_bytes(value)
parts = [p.get_payload(decode=True) for p in message.walk()][1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
# This test case tests when empty value passed into app_iter_ranges
# When ranges passed into the method is either empty array or None,
# this method will yield empty string
df, df_data = self._create_test_file(b'012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', len(df_data))
self.assertEqual(b''.join(it), b'')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
it = reader.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(b''.join(it), b'')
self.assertEqual(quarantine_msgs, [])
def test_disk_file_mkstemp_creates_dir(self):
for policy in POLICIES:
tmpdir = os.path.join(self.testdir, self.existing_device,
diskfile.get_tmp_dir(policy))
os.rmdir(tmpdir)
df = self._simple_get_diskfile(policy=policy)
df.manager.use_linkat = False
with df.create():
self.assertTrue(os.path.exists(tmpdir))
def test_disk_file_writer(self):
df = self._simple_get_diskfile()
with df.create() as writer:
self.assertIsInstance(writer, diskfile.BaseDiskFileWriter)
# create automatically opens for us
self.assertIsNotNone(writer._fd)
# can't re-open since we're already open
with self.assertRaises(ValueError):
writer.open()
writer.write(b'asdf')
writer.close()
# can't write any more
with self.assertRaises(ValueError):
writer.write(b'asdf')
# can close again
writer.close()
def test_disk_file_concurrent_writes(self):
def threadA(df, events, errors):
try:
ts = self.ts()
with df.create() as writer:
writer.write(b'dataA')
writer.put({
'X-Timestamp': ts.internal,
'Content-Length': 5,
})
events[0].set()
events[1].wait()
writer.commit(ts)
except Exception as e:
errors.append(e)
raise
def threadB(df, events, errors):
try:
events[0].wait()
ts = self.ts()
with df.create() as writer:
writer.write(b'dataB')
writer.put({
'X-Timestamp': ts.internal,
'Content-Length': 5,
})
writer.commit(ts)
events[1].set()
except Exception as e:
errors.append(e)
raise
df = self._simple_get_diskfile()
events = [threading.Event(), threading.Event()]
errors = []
threads = [threading.Thread(target=tgt, args=(df, events, errors))
for tgt in (threadA, threadB)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(errors)
with df.open(), open(df._data_file, 'rb') as fp:
self.assertEqual(b'dataB', fp.read())
def test_disk_file_concurrent_marked_durable(self):
ts = self.ts()
def threadA(df, events, errors):
try:
with df.create() as writer:
writer.write(b'dataA')
writer.put({
'X-Timestamp': ts.internal,
'Content-Length': 5,
})
events[0].set()
events[1].wait()
writer.commit(ts)
except Exception as e:
errors.append(e)
raise
def threadB(df, events, errors):
try:
events[0].wait()
# Mark it durable just like in ssync_receiver
with df.create() as writer:
writer.commit(ts)
events[1].set()
except Exception as e:
errors.append(e)
raise
df = self._simple_get_diskfile()
events = [threading.Event(), threading.Event()]
errors = []
threads = [threading.Thread(target=tgt, args=(df, events, errors))
for tgt in (threadA, threadB)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(errors)
with df.open(), open(df._data_file, 'rb') as fp:
if df.policy.policy_type == EC_POLICY:
# Confirm that it really *was* marked durable
self.assertTrue(df._data_file.endswith('#d.data'))
self.assertEqual(b'dataA', fp.read())
def test_disk_file_concurrent_delete(self):
def threadA(df, events, errors):
try:
ts = self.ts()
with df.create() as writer:
writer.write(b'dataA')
writer.put({'X-Timestamp': ts.internal})
events[0].set()
events[1].wait()
writer.commit(ts)
except Exception as e:
errors.append(e)
raise
def threadB(df, events, errors):
try:
events[0].wait()
df.delete(self.ts())
events[1].set()
except Exception as e:
errors.append(e)
raise
df = self._simple_get_diskfile()
events = [threading.Event(), threading.Event()]
errors = []
threads = [threading.Thread(target=tgt, args=(df, events, errors))
for tgt in (threadA, threadB)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertFalse(errors)
self.assertRaises(DiskFileDeleted, df.open)
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
ts=None, mount_check=False, extra_metadata=None,
policy=None, frag_index=None, data=None,
commit=True):
'''returns a DiskFile'''
policy = policy or POLICIES.legacy
df = self._simple_get_diskfile(obj=obj_name, policy=policy,
frag_index=frag_index)
data = data or b'0' * fsize
if not isinstance(data, bytes):
raise ValueError('data must be bytes')
if policy.policy_type == EC_POLICY:
archives = encode_frag_archive_bodies(policy, data)
try:
data = archives[df._frag_index]
except IndexError:
data = archives[0]
if ts:
timestamp = Timestamp(ts)
else:
timestamp = Timestamp.now()
if prealloc:
prealloc_size = fsize
else:
prealloc_size = None
with df.create(size=prealloc_size) as writer:
writer.write(data)
upload_size, etag = writer.chunks_finished()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp.internal,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-Content-Length':
metadata['Content-Length'] = 'zero'
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Missing-Content-Length':
del metadata['Content-Length']
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd, metadata)
if commit:
writer.commit(timestamp)
if mark_deleted:
df.delete(timestamp)
data_files = [os.path.join(df._datadir, fname)
for fname in sorted(os.listdir(df._datadir),
reverse=True)
if fname.endswith('.data')]
if invalid_type == 'Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
wrong_byte = b'X' if meta_xattr[:1] != b'X' else b'Y'
xattr.setxattr(data_files[0], "user.swift.metadata",
wrong_byte + meta_xattr[1:])
elif invalid_type == 'Subtly-Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
wrong_checksum = md5(meta_xattr + b"some extra stuff").hexdigest()
xattr.setxattr(data_files[0], "user.swift.metadata_checksum",
wrong_checksum.encode())
elif invalid_type == 'Truncated-Xattrs':
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
xattr.setxattr(data_files[0], "user.swift.metadata",
meta_xattr[:-1])
elif invalid_type == 'Missing-Name':
md = diskfile.read_metadata(data_files[0])
del md['name']
diskfile.write_metadata(data_files[0], md)
elif invalid_type == 'Bad-Name':
md = diskfile.read_metadata(data_files[0])
md['name'] = md['name'] + 'garbage'
diskfile.write_metadata(data_files[0], md)
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
self.df_mgr = self.mgr_cls(self.conf, self.logger)
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
# actual on disk frag_index may have been set by metadata
frag_index = metadata.get('X-Object-Sysmeta-Ec-Frag-Index',
frag_index)
df = self._simple_get_diskfile(obj=obj_name, policy=policy,
frag_index=frag_index)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
def test_keep_cache(self):
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as foo:
for _ in df.reader():
pass
self.assertTrue(foo.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as bar:
for _ in df.reader(keep_cache=False):
pass
self.assertTrue(bar.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as boo:
for _ in df.reader(keep_cache=True):
pass
self.assertFalse(boo.called)
df = self._get_open_disk_file(fsize=50 * 1024, csize=256)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as goo:
for _ in df.reader(keep_cache=True):
pass
self.assertTrue(goo.called)
def test_quarantine_valids(self):
def verify(*args, **kwargs):
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader()
for chunk in reader:
pass
except DiskFileQuarantined:
self.fail(
"Unexpected quarantining occurred: args=%r, kwargs=%r" % (
args, kwargs))
else:
pass
verify(obj_name='1')
verify(obj_name='2', csize=1)
verify(obj_name='3', csize=100000)
def run_quarantine_invalids(self, invalid_type):
open_exc = invalid_type in ('Content-Length', 'Bad-Content-Length',
'Subtly-Corrupt-Xattrs',
'Corrupt-Xattrs', 'Truncated-Xattrs',
'Missing-Name', 'Bad-X-Delete-At')
open_collision = invalid_type == 'Bad-Name'
def verify(*args, **kwargs):
quarantine_msgs = []
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader:
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
else:
if not open_exc:
self.assertEqual(1, len(quarantine_msgs))
verify(invalid_type=invalid_type, obj_name='1')
verify(invalid_type=invalid_type, obj_name='2', csize=1)
verify(invalid_type=invalid_type, obj_name='3', csize=100000)
verify(invalid_type=invalid_type, obj_name='4')
def verify_air(params, start=0, adjustment=0):
"""verify (a)pp (i)ter (r)ange"""
try:
df = self._get_open_disk_file(**params)
reader = df.reader()
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader.app_iter_range(
start,
df.unit_test_len + adjustment):
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
verify_air(dict(invalid_type=invalid_type, obj_name='5'))
verify_air(dict(invalid_type=invalid_type, obj_name='6'), 0, 100)
verify_air(dict(invalid_type=invalid_type, obj_name='7'), 1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 0, -1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 1, 1)
def test_quarantine_corrupt_xattrs(self):
self.run_quarantine_invalids('Corrupt-Xattrs')
def test_quarantine_subtly_corrupt_xattrs(self):
# xattrs that unpickle without error, but whose checksum does not
# match
self.run_quarantine_invalids('Subtly-Corrupt-Xattrs')
def test_quarantine_truncated_xattrs(self):
self.run_quarantine_invalids('Truncated-Xattrs')
def test_quarantine_invalid_etag(self):
self.run_quarantine_invalids('ETag')
def test_quarantine_invalid_missing_name(self):
self.run_quarantine_invalids('Missing-Name')
def test_quarantine_invalid_bad_name(self):
self.run_quarantine_invalids('Bad-Name')
def test_quarantine_invalid_bad_x_delete_at(self):
self.run_quarantine_invalids('Bad-X-Delete-At')
def test_quarantine_invalid_content_length(self):
self.run_quarantine_invalids('Content-Length')
def test_quarantine_invalid_content_length_bad(self):
self.run_quarantine_invalids('Bad-Content-Length')
def test_quarantine_invalid_zero_byte(self):
self.run_quarantine_invalids('Zero-Byte')
def test_quarantine_deleted_files(self):
try:
self._get_open_disk_file(invalid_type='Content-Length')
except DiskFileQuarantined:
pass
else:
self.fail("Expected DiskFileQuarantined exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantined exception"
" encountered: %r" % err)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
def test_quarantine_missing_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Missing-Content-Length')
def test_quarantine_bad_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Bad-Content-Length')
def test_quarantine_fstat_oserror(self):
with mock.patch('os.fstat', side_effect=OSError()):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file)
def test_quarantine_ioerror_enodata(self):
df = self._get_open_disk_file()
def my_open(filename, mode, *args, **kwargs):
if mode == 'rb':
raise IOError(errno.ENODATA, '-ENODATA fool!')
return open(filename, mode, *args, **kwargs)
with mock.patch('swift.obj.diskfile.open', my_open):
with self.assertRaises(DiskFileQuarantined) as err:
df.open()
self.assertEqual(
'Failed to open %s: [Errno 61] -ENODATA fool!' % df._data_file,
str(err.exception))
def test_quarantine_hashdir_not_a_directory(self):
df, df_data = self._create_test_file(b'1234567890', account="abc",
container='123', obj='xyz')
hashdir = df._datadir
rmtree(hashdir)
with open(hashdir, 'w'):
pass
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
self.assertRaises(DiskFileQuarantined, df.open)
# make sure the right thing got quarantined; the suffix dir should not
# have moved, as that could have many objects in it
self.assertFalse(os.path.exists(hashdir))
self.assertTrue(os.path.exists(os.path.dirname(hashdir)))
def test_quarantine_hashdir_not_listable(self):
df, df_data = self._create_test_file(b'1234567890', account="abc",
container='123', obj='xyz')
hashdir = df._datadir
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
with mock.patch('os.listdir',
side_effect=OSError(errno.ENODATA, 'nope')):
self.assertRaises(DiskFileQuarantined, df.open)
# make sure the right thing got quarantined; the suffix dir should not
# have moved, as that could have many objects in it
self.assertFalse(os.path.exists(hashdir))
self.assertTrue(os.path.exists(os.path.dirname(hashdir)))
def test_create_prealloc(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.fallocate") as fa:
with df.create(size=200) as writer:
used_fd = writer._fd
fa.assert_called_with(used_fd, 200)
def test_create_prealloc_oserror(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
e, os.strerror(e)))):
try:
with df.create(size=200):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
# Other OSErrors must not be raised as DiskFileNoSpace
with mock.patch("swift.obj.diskfile.fallocate",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
try:
with df.create(size=200):
pass
except OSError:
pass
else:
self.fail("Expected exception OSError")
def test_create_mkstemp_no_space(self):
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
for e in (errno.ENOSPC, errno.EDQUOT):
with mock.patch("swift.obj.diskfile.mkstemp",
mock.MagicMock(side_effect=OSError(
e, os.strerror(e)))):
with self.assertRaises(DiskFileNoSpace):
with df.create(size=200):
pass
# Other OSErrors must not be raised as DiskFileNoSpace
with mock.patch("swift.obj.diskfile.mkstemp",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
with self.assertRaises(OSError) as raised:
with df.create(size=200):
pass
self.assertEqual(raised.exception.errno, errno.EACCES)
def test_create_close_oserror(self):
# This is a horrible hack so you can run this test in isolation.
# Some of the ctypes machinery calls os.close(), and that runs afoul
# of our mock.
with mock.patch.object(utils, '_sys_fallocate', None):
utils.disable_fallocate()
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc',
'123', 'xyz', policy=POLICIES.legacy)
with mock.patch("swift.obj.diskfile.os.close",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
with df.create(size=200):
pass
def test_write_metadata(self):
df, df_data = self._create_test_file(b'1234567890')
file_count = len(os.listdir(df._datadir))
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
exp_name = '%s.meta' % timestamp
self.assertIn(exp_name, set(dl))
def test_write_metadata_with_content_type(self):
# if metadata has content-type then its time should be in file name
df, df_data = self._create_test_file(b'1234567890')
file_count = len(os.listdir(df._datadir))
timestamp = Timestamp.now()
metadata = {'X-Timestamp': timestamp.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
exp_name = '%s+0.meta' % timestamp.internal
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_older_content_type(self):
# if metadata has content-type then its time should be in file name
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_content_type_removes_same_time_meta(self):
# a meta file without content-type should be cleaned up in favour of
# a meta file at same time with content-type
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_with_content_type_removes_multiple_metas(self):
# a combination of a meta file without content-type and an older meta
# file with content-type should be cleaned up in favour of a meta file
# at newer time with content-type
ts_iter = make_timestamp_iter()
df, df_data = self._create_test_file(b'1234567890',
timestamp=next(ts_iter))
file_count = len(os.listdir(df._datadir))
timestamp = next(ts_iter)
timestamp2 = next(ts_iter)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data'}
df.write_metadata(metadata)
metadata = {'X-Timestamp': timestamp.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 2, dl)
metadata = {'X-Timestamp': timestamp2.internal,
'X-Object-Meta-test': 'data',
'Content-Type': 'foo',
'Content-Type-Timestamp': timestamp.internal}
df.write_metadata(metadata)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1, dl)
exp_name = '%s-%x.meta' % (timestamp2.internal,
timestamp2.raw - timestamp.raw)
self.assertTrue(exp_name in set(dl),
'Expected file %s not found in %s' % (exp_name, dl))
def test_write_metadata_no_xattr(self):
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
def mock_setxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, "Operation not supported")
with mock.patch('xattr.setxattr', mock_setxattr):
self.assertRaises(
DiskFileXattrNotSupported,
diskfile.write_metadata, 'n/a', metadata)
def test_write_metadata_disk_full(self):
timestamp = Timestamp.now().internal
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
def mock_setxattr_ENOSPC(*args, **kargs):
raise IOError(errno.ENOSPC, "No space left on device")
def mock_setxattr_EDQUOT(*args, **kargs):
raise IOError(errno.EDQUOT, "Exceeded quota")
with mock.patch('xattr.setxattr', mock_setxattr_ENOSPC):
self.assertRaises(
DiskFileNoSpace,
diskfile.write_metadata, 'n/a', metadata)
with mock.patch('xattr.setxattr', mock_setxattr_EDQUOT):
self.assertRaises(
DiskFileNoSpace,
diskfile.write_metadata, 'n/a', metadata)
def _create_diskfile_dir(self, timestamp, policy, legacy_durable=False,
partition=0, next_part_power=None,
expect_error=False):
timestamp = Timestamp(timestamp)
df = self._simple_get_diskfile(account='a', container='c',
obj='o_%s' % policy,
policy=policy,
partition=partition,
next_part_power=next_part_power)
frag_index = None
if policy.policy_type == EC_POLICY:
frag_index = df._frag_index or 7
if expect_error:
with self.assertRaises(Exception):
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
else:
write_diskfile(df, timestamp, frag_index=frag_index,
legacy_durable=legacy_durable)
return df._datadir
def test_commit(self):
for policy in POLICIES:
timestamp = Timestamp.now()
df = self._simple_get_diskfile(account='a', container='c',
obj='o_%s' % policy,
policy=policy)
write_diskfile(df, timestamp, frag_index=2)
dl = os.listdir(df._datadir)
expected = [_make_datafilename(
timestamp, policy, frag_index=2, durable=True)]
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
if policy.policy_type == EC_POLICY:
self.assertEqual(2, df._frag_index)
def _do_test_write_cleanup(self, policy, legacy_durable=False):
# create first fileset as starting state
timestamp_1 = Timestamp.now()
datadir_1 = self._create_diskfile_dir(
timestamp_1, policy, legacy_durable)
# second write should clean up first fileset
timestamp_2 = Timestamp(time() + 1)
datadir_2 = self._create_diskfile_dir(timestamp_2, policy)
# sanity check
self.assertEqual(datadir_1, datadir_2)
dl = os.listdir(datadir_2)
expected = [_make_datafilename(
timestamp_2, policy, frag_index=2, durable=True)]
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
def test_write_cleanup(self):
for policy in POLICIES:
self._do_test_write_cleanup(policy)
def test_write_cleanup_legacy_durable(self):
for policy in POLICIES:
self._do_test_write_cleanup(policy, legacy_durable=True)
@mock.patch("swift.obj.diskfile.BaseDiskFileManager.cleanup_ondisk_files")
def test_write_cleanup_part_power_increase(self, mock_cleanup):
# Without next_part_power set we expect only one cleanup per DiskFile
# and no linking
for policy in POLICIES:
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(timestamp, policy)
self.assertEqual(1, mock_cleanup.call_count)
mock_cleanup.assert_called_once_with(df_dir)
mock_cleanup.reset_mock()
# With next_part_power set to part_power + 1 we expect two cleanups per
# DiskFile: first cleanup the current directory, but also cleanup the
# future directory where hardlinks are created
for policy in POLICIES:
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(
timestamp, policy, next_part_power=11)
self.assertEqual(2, mock_cleanup.call_count)
mock_cleanup.assert_any_call(df_dir)
# Make sure the translated path is also cleaned up
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 11)
mock_cleanup.assert_any_call(expected_dir)
mock_cleanup.reset_mock()
# With next_part_power set to part_power we expect two cleanups per
# DiskFile: first cleanup the current directory, but also cleanup the
# previous old directory
for policy in POLICIES:
hash_path = utils.hash_path('a', 'c', 'o_%s' % policy)
partition = utils.get_partition_for_hash(hash_path, 10)
timestamp = Timestamp(time()).internal
df_dir = self._create_diskfile_dir(
timestamp, policy, partition=partition, next_part_power=10)
self.assertEqual(2, mock_cleanup.call_count)
mock_cleanup.assert_any_call(df_dir)
# Make sure the path using the old part power is also cleaned up
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 9)
mock_cleanup.assert_any_call(expected_dir)
mock_cleanup.reset_mock()
@mock.patch.object(diskfile.BaseDiskFileManager, 'cleanup_ondisk_files',
side_effect=Exception)
def test_killed_before_cleanup(self, mock_cleanup):
for policy in POLICIES:
timestamp = Timestamp(time()).internal
hash_path = utils.hash_path('a', 'c', 'o_%s' % policy)
partition = utils.get_partition_for_hash(hash_path, 10)
df_dir = self._create_diskfile_dir(timestamp, policy,
partition=partition,
next_part_power=11,
expect_error=True)
expected_dir = utils.replace_partition_in_path(
self.conf['devices'], df_dir, 11)
self.assertEqual(os.listdir(df_dir), os.listdir(expected_dir))
def test_commit_fsync(self):
for policy in POLICIES:
df = self._simple_get_diskfile(account='a', container='c',
obj='o', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch('swift.obj.diskfile.fsync') as mock_fsync:
writer.put(metadata)
self.assertEqual(1, mock_fsync.call_count)
writer.commit(timestamp)
self.assertEqual(1, mock_fsync.call_count)
if policy.policy_type == EC_POLICY:
self.assertIsInstance(mock_fsync.call_args[0][0], int)
def test_commit_ignores_cleanup_ondisk_files_error(self):
for policy in POLICIES:
# Check OSError from cleanup_ondisk_files is caught and ignored
mock_cleanup = mock.MagicMock(side_effect=OSError)
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df), mock_cleanup):
writer.commit(timestamp)
expected = {
EC_POLICY: 1,
REPL_POLICY: 0,
}[policy.policy_type]
self.assertEqual(expected, mock_cleanup.call_count)
if expected:
self.assertIn(
'Problem cleaning up',
df.manager.logger.get_lines_for_level('error')[0])
expected = [_make_datafilename(
timestamp, policy, frag_index=2, durable=True)]
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), len(expected),
'Unexpected dir listing %s' % dl)
self.assertEqual(expected, dl)
def test_number_calls_to_cleanup_ondisk_files_during_create(self):
# Check how many calls are made to cleanup_ondisk_files, and when,
# during put(), commit() sequence
for policy in POLICIES:
expected = {
EC_POLICY: (0, 1),
REPL_POLICY: (1, 0),
}[policy.policy_type]
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.put(metadata)
self.assertEqual(expected[0], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.commit(timestamp)
self.assertEqual(expected[1], mock_cleanup.call_count)
def test_number_calls_to_cleanup_ondisk_files_during_delete(self):
# Check how many calls are made to cleanup_ondisk_files, and when,
# for delete() and necessary prerequisite steps
for policy in POLICIES:
expected = {
EC_POLICY: (0, 1, 1),
REPL_POLICY: (1, 0, 1),
}[policy.policy_type]
df = self._simple_get_diskfile(account='a', container='c',
obj='o_error', policy=policy)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.put(metadata)
self.assertEqual(expected[0], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
writer.commit(timestamp)
self.assertEqual(expected[1], mock_cleanup.call_count)
with mock.patch(self._manager_mock(
'cleanup_ondisk_files', df)) as mock_cleanup:
timestamp = Timestamp.now()
df.delete(timestamp)
self.assertEqual(expected[2], mock_cleanup.call_count)
def test_delete(self):
for policy in POLICIES:
if policy.policy_type == EC_POLICY:
metadata = {'X-Object-Sysmeta-Ec-Frag-Index': '1'}
fi = 1
else:
metadata = {}
fi = None
df = self._get_open_disk_file(policy=policy, frag_index=fi,
extra_metadata=metadata)
ts = Timestamp.now()
df.delete(ts)
exp_name = '%s.ts' % ts.internal
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
# cleanup before next policy
os.unlink(os.path.join(df._datadir, exp_name))
def test_open_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
self.assertRaises(DiskFileDeleted, df.open)
def test_open_deleted_with_corrupt_tombstone(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
# it's pickle-format, so removing the last byte is sufficient to
# corrupt it
ts_fullpath = os.path.join(df._datadir, exp_name)
self.assertTrue(os.path.exists(ts_fullpath)) # sanity check
meta_xattr = xattr.getxattr(ts_fullpath, "user.swift.metadata")
xattr.setxattr(ts_fullpath, "user.swift.metadata", meta_xattr[:-1])
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
self.assertFalse(os.path.exists(ts_fullpath))
def test_from_audit_location(self):
df, df_data = self._create_test_file(
b'blah blah',
account='three', container='blind', obj='mice')
hashdir = df._datadir
df = self.df_mgr.get_diskfile_from_audit_location(
diskfile.AuditLocation(hashdir, self.existing_device, '0',
policy=POLICIES.default))
df.open()
self.assertEqual(df._name, '/three/blind/mice')
def test_from_audit_location_with_mismatched_hash(self):
df, df_data = self._create_test_file(
b'blah blah',
account='this', container='is', obj='right')
hashdir = df._datadir
datafilename = [f for f in os.listdir(hashdir)
if f.endswith('.data')][0]
datafile = os.path.join(hashdir, datafilename)
meta = diskfile.read_metadata(datafile)
meta['name'] = '/this/is/wrong'
diskfile.write_metadata(datafile, meta)
df = self.df_mgr.get_diskfile_from_audit_location(
diskfile.AuditLocation(hashdir, self.existing_device, '0',
policy=POLICIES.default))
self.assertRaises(DiskFileQuarantined, df.open)
def test_close_error(self):
def mock_handle_close_quarantine():
raise Exception("Bad")
df = self._get_open_disk_file(fsize=1024 * 1024 * 2, csize=1024)
reader = df.reader()
reader._handle_close_quarantine = mock_handle_close_quarantine
for chunk in reader:
pass
# close is called at the end of the iterator
self.assertIsNone(reader._fp)
error_lines = df._logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
self.assertIn('close failure', error_lines[0])
self.assertIn('Bad', error_lines[0])
def test_mount_checking(self):
def _mock_cm(*args, **kwargs):
return False
with mock.patch("swift.common.constraints.check_mount", _mock_cm):
self.assertRaises(
DiskFileDeviceUnavailable,
self._get_open_disk_file,
mount_check=True)
def test_ondisk_search_loop_ts_meta_data(self):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.ts', timestamp=10)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=9)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=8)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=7)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileDeleted) as raised:
df.open()
self.assertEqual(raised.exception.timestamp, Timestamp(10).internal)
def test_ondisk_search_loop_meta_ts_data(self):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileDeleted) as raised:
df.open()
self.assertEqual(raised.exception.timestamp, Timestamp(8).internal)
def _test_ondisk_search_loop_meta_data_ts(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=6)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_meta_data_ts(self):
self._test_ondisk_search_loop_meta_data_ts()
def test_ondisk_search_loop_meta_data_ts_legacy_durable(self):
self._test_ondisk_search_loop_meta_data_ts(legacy_durable=True)
def _test_ondisk_search_loop_multiple_meta_data(self,
legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
metadata={'X-Object-Meta-User': 'user-meta'})
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
ctype_timestamp=9,
metadata={'Content-Type': 'newest',
'X-Object-Meta-User': 'blah'})
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
metadata={'Content-Type': 'newer'})
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7,
metadata={'Content-Type': 'oldest'})
df = self._simple_get_diskfile()
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertTrue('Content-Type' in df._metadata)
self.assertEqual(df._metadata['Content-Type'], 'newest')
self.assertTrue('X-Object-Meta-User' in df._metadata)
self.assertEqual(df._metadata['X-Object-Meta-User'], 'user-meta')
def test_ondisk_search_loop_multiple_meta_data(self):
self._test_ondisk_search_loop_multiple_meta_data()
def test_ondisk_search_loop_multiple_meta_data_legacy_durable(self):
self._test_ondisk_search_loop_multiple_meta_data(legacy_durable=True)
def _test_ondisk_search_loop_stale_meta_data(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
metadata={'X-Object-Meta-User': 'user-meta'})
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
ctype_timestamp=7,
metadata={'Content-Type': 'older',
'X-Object-Meta-User': 'blah'})
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
metadata={'Content-Type': 'newer'})
df = self._simple_get_diskfile()
with df.open():
self.assertTrue('X-Timestamp' in df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertTrue('Content-Type' in df._metadata)
self.assertEqual(df._metadata['Content-Type'], 'newer')
self.assertTrue('X-Object-Meta-User' in df._metadata)
self.assertEqual(df._metadata['X-Object-Meta-User'], 'user-meta')
def test_ondisk_search_loop_stale_meta_data(self):
self._test_ondisk_search_loop_stale_meta_data()
def test_ondisk_search_loop_stale_meta_data_legacy_durable(self):
self._test_ondisk_search_loop_stale_meta_data(legacy_durable=True)
def _test_ondisk_search_loop_data_ts_meta(self, legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_data_ts_meta(self):
self._test_ondisk_search_loop_data_ts_meta()
def test_ondisk_search_loop_data_ts_meta_legacy_durable(self):
self._test_ondisk_search_loop_data_ts_meta(legacy_durable=True)
def _test_ondisk_search_loop_wayward_files_ignored(self,
legacy_durable=False):
df = self._simple_get_diskfile()
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
self._create_ondisk_file(
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
self._create_ondisk_file(
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
with df.open():
self.assertIn('X-Timestamp', df._metadata)
self.assertEqual(df._metadata['X-Timestamp'],
Timestamp(10).internal)
self.assertNotIn('deleted', df._metadata)
def test_ondisk_search_loop_wayward_files_ignored(self):
self._test_ondisk_search_loop_wayward_files_ignored()
def test_ondisk_search_loop_wayward_files_ignored_legacy_durable(self):
self._test_ondisk_search_loop_wayward_files_ignored(
legacy_durable=True)
def _test_ondisk_search_loop_listdir_error(self, legacy_durable=False):
df = self._simple_get_diskfile()
def mock_listdir_exp(*args, **kwargs):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
with mock.patch("os.listdir", mock_listdir_exp):
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
self._create_ondisk_file(df, b'B', ext='.data', timestamp=10,
legacy_durable=legacy_durable)
self._create_ondisk_file(df, b'A', ext='.data', timestamp=9,
legacy_durable=legacy_durable)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
df = self._simple_get_diskfile()
self.assertRaises(DiskFileError, df.open)
def test_ondisk_search_loop_listdir_error(self):
self._test_ondisk_search_loop_listdir_error()
def test_ondisk_search_loop_listdir_error_legacy_durable(self):
self._test_ondisk_search_loop_listdir_error(legacy_durable=True)
def test_exception_in_handle_close_quarantine(self):
df = self._get_open_disk_file()
def blow_up():
raise Exception('a very special error')
reader = df.reader()
reader._handle_close_quarantine = blow_up
for _ in reader:
pass
reader.close()
log_lines = df._logger.get_lines_for_level('error')
self.assertIn('a very special error', log_lines[-1])
def test_diskfile_names(self):
df = self._simple_get_diskfile()
self.assertEqual(df.account, 'a')
self.assertEqual(df.container, 'c')
self.assertEqual(df.obj, 'o')
def test_diskfile_content_length_not_open(self):
df = self._simple_get_diskfile()
exc = None
try:
df.content_length
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
exc = None
try:
with df.open():
df.content_length
except DiskFileDeleted as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length(self):
self._get_open_disk_file()
df = self._simple_get_diskfile()
with df.open():
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(df.content_length, expected)
def test_diskfile_timestamp_not_open(self):
df = self._simple_get_diskfile()
exc = None
try:
df.timestamp
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp_deleted(self):
df = self._get_open_disk_file()
ts = time()
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), 1)
self.assertIn(exp_name, set(dl))
df = self._simple_get_diskfile()
exc = None
try:
with df.open():
df.timestamp
except DiskFileDeleted as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal)
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.timestamp, ts_1.internal)
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.timestamp, ts_2.internal)
def test_data_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal)
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.data_timestamp, ts_1.internal)
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.data_timestamp, ts_1.internal)
def test_data_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.data_timestamp
def test_content_type_and_timestamp(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal,
extra_metadata={'Content-Type': 'image/jpeg'})
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.content_type_timestamp)
self.assertEqual('image/jpeg', df.content_type)
ts_2 = self.ts()
ts_3 = self.ts()
df.write_metadata({'X-Timestamp': ts_3.internal,
'Content-Type': 'image/gif',
'Content-Type-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_3.internal, df.timestamp)
self.assertEqual(ts_2.internal, df.content_type_timestamp)
self.assertEqual('image/gif', df.content_type)
def test_content_type_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.content_type_timestamp
def test_content_type_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.content_type
def _do_test_durable_timestamp(self, legacy_durable):
ts_1 = self.ts()
df = self._simple_get_diskfile(frag_index=2)
write_diskfile(df, ts_1, legacy_durable=legacy_durable)
# get a new instance of the diskfile to ensure timestamp variable is
# set by the open() and not just the write operations
df = self._simple_get_diskfile(frag_index=2)
with df.open():
self.assertEqual(df.durable_timestamp, ts_1.internal)
# verify durable timestamp does not change when metadata is written
ts_2 = self.ts()
df.write_metadata({'X-Timestamp': ts_2.internal})
with df.open():
self.assertEqual(df.durable_timestamp, ts_1.internal)
def test_durable_timestamp(self):
self._do_test_durable_timestamp(False)
def test_durable_timestamp_not_open(self):
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotOpen):
df.durable_timestamp
def test_durable_timestamp_no_data_file(self):
df = self._get_open_disk_file(self.ts().internal)
for f in os.listdir(df._datadir):
if f.endswith('.data'):
os.unlink(os.path.join(df._datadir, f))
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no data file so expect None
self.assertIsNone(df.durable_timestamp)
def test_error_in_cleanup_ondisk_files(self):
def mock_cleanup(*args, **kwargs):
raise OSError()
df = self._get_open_disk_file()
file_count = len(os.listdir(df._datadir))
ts = time()
with mock.patch(
self._manager_mock('cleanup_ondisk_files'), mock_cleanup):
# Expect to swallow the OSError
df.delete(ts)
exp_name = '%s.ts' % str(Timestamp(ts).internal)
dl = os.listdir(df._datadir)
self.assertEqual(len(dl), file_count + 1)
self.assertIn(exp_name, set(dl))
def _system_can_zero_copy(self):
if not splice.available:
return False
try:
utils.get_md5_socket()
except IOError:
return False
return True
def test_zero_copy_cache_dropping(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
self.conf['keep_cache_size'] = 16384
self.conf['disk_chunk_size'] = 4096
df = self._get_open_disk_file(fsize=163840)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as dbc:
with mock.patch("swift.obj.diskfile.DROP_CACHE_WINDOW", 4095):
with open('/dev/null', 'w') as devnull:
reader.zero_copy_send(devnull.fileno())
if df.policy.policy_type == EC_POLICY:
expected = 4 + 1
else:
expected = (4 * 10) + 1
self.assertEqual(len(dbc.mock_calls), expected)
def test_zero_copy_turns_off_when_md5_sockets_not_supported(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
df_mgr = self.df_router[POLICIES.default]
self.conf['splice'] = 'on'
with mock.patch('swift.obj.diskfile.get_md5_socket') as mock_md5sock:
mock_md5sock.side_effect = IOError(
errno.EAFNOSUPPORT, "MD5 socket busted")
df = self._get_open_disk_file(fsize=128)
reader = df.reader()
self.assertFalse(reader.can_zero_copy_send())
log_lines = df_mgr.logger.get_lines_for_level('warning')
self.assertIn('MD5 sockets', log_lines[-1])
def test_tee_to_md5_pipe_length_mismatch(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
df = self._get_open_disk_file(fsize=16385)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
with mock.patch('swift.obj.diskfile.tee') as mock_tee:
mock_tee.side_effect = lambda _1, _2, _3, cnt: cnt - 1
with open('/dev/null', 'w') as devnull:
exc_re = (r'tee\(\) failed: tried to move \d+ bytes, but only '
r'moved -?\d+')
try:
reader.zero_copy_send(devnull.fileno())
except Exception as e:
self.assertTrue(re.match(exc_re, str(e)))
else:
self.fail('Expected Exception was not raised')
def test_splice_to_wsockfd_blocks(self):
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.conf['splice'] = 'on'
df = self._get_open_disk_file(fsize=16385)
reader = df.reader()
self.assertTrue(reader.can_zero_copy_send())
def _run_test():
# Set up mock of `splice`
splice_called = [False] # State hack
def fake_splice(fd_in, off_in, fd_out, off_out, len_, flags):
if fd_out == devnull.fileno() and not splice_called[0]:
splice_called[0] = True
err = errno.EWOULDBLOCK
raise IOError(err, os.strerror(err))
return splice(fd_in, off_in, fd_out, off_out,
len_, flags)
mock_splice.side_effect = fake_splice
# Set up mock of `trampoline`
# There are 2 reasons to mock this:
#
# - We want to ensure it's called with the expected arguments at
# least once
# - When called with our write FD (which points to `/dev/null`), we
# can't actually call `trampoline`, because adding such FD to an
# `epoll` handle results in `EPERM`
def fake_trampoline(fd, read=None, write=None, timeout=None,
timeout_exc=timeout.Timeout,
mark_as_closed=None):
if write and fd == devnull.fileno():
return
else:
hubs.trampoline(fd, read=read, write=write,
timeout=timeout, timeout_exc=timeout_exc,
mark_as_closed=mark_as_closed)
mock_trampoline.side_effect = fake_trampoline
reader.zero_copy_send(devnull.fileno())
# Assert the end of `zero_copy_send` was reached
self.assertTrue(mock_close.called)
# Assert there was at least one call to `trampoline` waiting for
# `write` access to the output FD
mock_trampoline.assert_any_call(devnull.fileno(), write=True)
# Assert at least one call to `splice` with the output FD we expect
for call in mock_splice.call_args_list:
args = call[0]
if args[2] == devnull.fileno():
break
else:
self.fail('`splice` not called with expected arguments')
with mock.patch('swift.obj.diskfile.splice') as mock_splice:
with mock.patch.object(
reader, 'close', side_effect=reader.close) as mock_close:
with open('/dev/null', 'w') as devnull:
with mock.patch('swift.obj.diskfile.trampoline') as \
mock_trampoline:
_run_test()
def test_create_unlink_cleanup_DiskFileNoSpace(self):
# Test cleanup when DiskFileNoSpace() is raised.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.Mock()
with mock.patch("swift.obj.diskfile.fallocate", _m_fallocate):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
self.assertNotIn('error', self.logger.all_log_lines())
def test_create_unlink_cleanup_renamer_fails(self):
# Test cleanup when renamer fails
_m_renamer = mock.MagicMock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT)))
_m_unlink = mock.Mock()
df = self._simple_get_diskfile()
df.manager.use_linkat = False
data = b'0' * 100
metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': Timestamp.now().internal,
'Content-Length': str(100),
}
with mock.patch("swift.obj.diskfile.renamer", _m_renamer):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100) as writer:
writer.write(data)
writer.put(metadata)
except OSError:
pass
else:
self.fail("Expected OSError exception")
self.assertFalse(writer._put_succeeded)
self.assertTrue(_m_renamer.called)
self.assertTrue(_m_unlink.called)
self.assertNotIn('error', self.logger.all_log_lines())
def test_create_unlink_cleanup_logging(self):
# Test logging of os.unlink() failures.
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
'xyz', policy=POLICIES.legacy)
df.manager.use_linkat = False
_m_fallocate = mock.MagicMock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_unlink = mock.MagicMock(side_effect=OSError(errno.ENOENT,
os.strerror(errno.ENOENT)))
with mock.patch("swift.obj.diskfile.fallocate", _m_fallocate):
with mock.patch("os.unlink", _m_unlink):
try:
with df.create(size=100):
pass
except DiskFileNoSpace:
pass
else:
self.fail("Expected exception DiskFileNoSpace")
self.assertTrue(_m_fallocate.called)
self.assertTrue(_m_unlink.called)
error_lines = self.logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith("Error removing tempfile:"))
@requires_o_tmpfile_support_in_tmp
def test_get_tempfile_use_linkat_os_open_called(self):
df = self._simple_get_diskfile()
self.assertTrue(df.manager.use_linkat)
_m_mkstemp = mock.MagicMock()
_m_os_open = mock.Mock(return_value=12345)
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.os.open", _m_os_open):
with mock.patch("swift.obj.diskfile.makedirs_count", _m_mkc):
writer = df.writer()
fd, tmppath = writer._get_tempfile()
self.assertTrue(_m_mkc.called)
flags = O_TMPFILE | os.O_WRONLY
_m_os_open.assert_called_once_with(df._datadir, flags)
self.assertIsNone(tmppath)
self.assertEqual(fd, 12345)
self.assertFalse(_m_mkstemp.called)
@requires_o_tmpfile_support_in_tmp
def test_get_tempfile_fallback_to_mkstemp(self):
df = self._simple_get_diskfile()
df._logger = debug_logger()
self.assertTrue(df.manager.use_linkat)
for err in (errno.EOPNOTSUPP, errno.EISDIR, errno.EINVAL):
df.manager.use_linkat = True
_m_open = mock.Mock(side_effect=OSError(err, os.strerror(err)))
_m_mkstemp = mock.MagicMock(return_value=(0, "blah"))
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.os.open", _m_open):
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.makedirs_count",
_m_mkc):
writer = df.writer()
fd, tmppath = writer._get_tempfile()
self.assertTrue(_m_mkc.called)
# Fallback should succeed and mkstemp() should be called.
self.assertTrue(_m_mkstemp.called)
self.assertEqual(tmppath, "blah")
# Once opening file with O_TMPFILE has failed,
# failure is cached to not try again
self.assertFalse(df.manager.use_linkat)
# Now that we try to use O_TMPFILE all the time, log at debug
# instead of warning
log = df.manager.logger.get_lines_for_level('warning')
self.assertFalse(log)
log = df.manager.logger.get_lines_for_level('debug')
self.assertGreater(len(log), 0)
self.assertTrue('O_TMPFILE' in log[-1])
@requires_o_tmpfile_support_in_tmp
def test_get_tmpfile_os_open_other_exceptions_are_raised(self):
df = self._simple_get_diskfile()
_m_open = mock.Mock(side_effect=OSError(errno.ENOSPC,
os.strerror(errno.ENOSPC)))
_m_mkstemp = mock.MagicMock()
_m_mkc = mock.Mock()
with mock.patch("swift.obj.diskfile.os.open", _m_open):
with mock.patch("swift.obj.diskfile.mkstemp", _m_mkstemp):
with mock.patch("swift.obj.diskfile.makedirs_count", _m_mkc):
try:
writer = df.writer()
fd, tmppath = writer._get_tempfile()
except OSError as err:
self.assertEqual(err.errno, errno.ENOSPC)
else:
self.fail("Expecting ENOSPC")
self.assertTrue(_m_mkc.called)
# mkstemp() should not be invoked.
self.assertFalse(_m_mkstemp.called)
@requires_o_tmpfile_support_in_tmp
def test_create_use_linkat_renamer_not_called(self):
df = self._simple_get_diskfile()
data = b'0' * 100
metadata = {
'ETag': md5(data).hexdigest(),
'X-Timestamp': Timestamp.now().internal,
'Content-Length': str(100),
}
_m_renamer = mock.Mock()
with mock.patch("swift.obj.diskfile.renamer", _m_renamer):
with df.create(size=100) as writer:
writer.write(data)
writer.put(metadata)
self.assertTrue(writer._put_succeeded)
self.assertFalse(_m_renamer.called)
@patch_policies(test_policies)
class TestDiskFile(DiskFileMixin, unittest.TestCase):
mgr_cls = diskfile.DiskFileManager
@patch_policies(with_ec_default=True)
class TestECDiskFile(DiskFileMixin, unittest.TestCase):
mgr_cls = diskfile.ECDiskFileManager
def _test_commit_raises_DiskFileError_for_rename_error(self, fake_err):
df = self._simple_get_diskfile(account='a', container='c',
obj='o_rename_err',
policy=POLICIES.default)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch('swift.obj.diskfile.os.rename',
side_effect=fake_err):
with self.assertRaises(DiskFileError) as cm:
writer.commit(timestamp)
dl = os.listdir(df._datadir)
datafile = _make_datafilename(
timestamp, POLICIES.default, frag_index=2, durable=False)
self.assertEqual([datafile], dl)
return df, cm.exception
def test_commit_raises_DiskFileError_for_rename_ENOSPC_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(errno.ENOSPC, 'ENOSPC'))
self.assertIsInstance(exc, DiskFileNoSpace)
self.assertIn('No space left on device', str(exc))
self.assertIn('No space left on device',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_EDQUOT_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(errno.EDQUOT, 'EDQUOT'))
self.assertIsInstance(exc, DiskFileNoSpace)
self.assertIn('No space left on device', str(exc))
self.assertIn('No space left on device',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_other_IOError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
IOError(21, 'Some other IO Error'))
self.assertIn('Problem making data file durable', str(exc))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_rename_OSError(self):
df, exc = self._test_commit_raises_DiskFileError_for_rename_error(
OSError(100, 'Some Error'))
self.assertIn('Problem making data file durable', str(exc))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def _test_commit_raises_DiskFileError_for_fsync_dir_errors(self, fake_err):
df = self._simple_get_diskfile(account='a', container='c',
obj='o_fsync_dir_err',
policy=POLICIES.default)
timestamp = Timestamp.now()
with df.create() as writer:
metadata = {
'ETag': 'bogus_etag',
'X-Timestamp': timestamp.internal,
'Content-Length': '0',
}
writer.put(metadata)
with mock.patch('swift.obj.diskfile.fsync_dir',
side_effect=fake_err):
with self.assertRaises(DiskFileError) as cm:
writer.commit(timestamp)
dl = os.listdir(df._datadir)
datafile = _make_datafilename(
timestamp, POLICIES.default, frag_index=2, durable=True)
self.assertEqual([datafile], dl)
self.assertIn('Problem making data file durable', str(cm.exception))
self.assertIn('Problem making data file durable',
df.manager.logger.get_lines_for_level('error')[0])
self.assertFalse(df.manager.logger.get_lines_for_level('error')[1:])
def test_commit_raises_DiskFileError_for_fsync_dir_IOError(self):
self._test_commit_raises_DiskFileError_for_fsync_dir_errors(
IOError(21, 'Some IO Error'))
def test_commit_raises_DiskFileError_for_fsync_dir_OSError(self):
self._test_commit_raises_DiskFileError_for_fsync_dir_errors(
OSError(100, 'Some Error'))
def test_data_file_has_frag_index(self):
policy = POLICIES.default
for good_value in (0, '0', 2, '2', 13, '13'):
# frag_index set by constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index=good_value)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
# frag index should be added to object sysmeta
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
# metadata value overrides the constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index='3',
extra_metadata=meta)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
# metadata value alone is sufficient
ts = self.ts()
expected = [_make_datafilename(
ts, policy, good_value, durable=True)]
meta = {'X-Object-Sysmeta-Ec-Frag-Index': good_value}
df = self._get_open_disk_file(ts=ts, policy=policy,
frag_index=None,
extra_metadata=meta)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(good_value), int(actual))
def test_sysmeta_frag_index_is_immutable(self):
# the X-Object-Sysmeta-Ec-Frag-Index should *only* be set when
# the .data file is written.
policy = POLICIES.default
orig_frag_index = 13
# frag_index set by constructor arg
ts = self.ts()
expected = [_make_datafilename(
ts, policy, frag_index=orig_frag_index, durable=True)]
df = self._get_open_disk_file(ts=ts, policy=policy, obj_name='my_obj',
frag_index=orig_frag_index)
self.assertEqual(expected, sorted(os.listdir(df._datadir)))
# frag index should be added to object sysmeta
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# open the same diskfile with no frag_index passed to constructor
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# write metadata to a meta file
ts = self.ts()
metadata = {'X-Timestamp': ts.internal,
'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
# sanity check we did write a meta file
expected.append('%s.meta' % ts.internal)
actual_files = sorted(os.listdir(df._datadir))
self.assertEqual(expected, actual_files)
# open the same diskfile, check frag index is unchanged
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
# sanity check we have read the meta file
self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
self.assertEqual('kiwi', df.get_metadata().get('X-Object-Meta-Fruit'))
# check frag index sysmeta is unchanged
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
# attempt to overwrite frag index sysmeta
ts = self.ts()
metadata = {'X-Timestamp': ts.internal,
'X-Object-Sysmeta-Ec-Frag-Index': 99,
'X-Object-Meta-Fruit': 'apple'}
df.write_metadata(metadata)
# open the same diskfile, check frag index is unchanged
df = self.df_router[policy].get_diskfile(
self.existing_device, 0, 'a', 'c', 'my_obj', policy=policy,
frag_index=None)
df.open()
# sanity check we have read the meta file
self.assertEqual(ts, df.get_metadata().get('X-Timestamp'))
self.assertEqual('apple', df.get_metadata().get('X-Object-Meta-Fruit'))
actual = df.get_metadata().get('X-Object-Sysmeta-Ec-Frag-Index')
self.assertEqual(int(orig_frag_index), int(actual))
def test_data_file_errors_bad_frag_index(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
for bad_value in ('foo', '-2', -2, '3.14', 3.14, '14', 14, '999'):
# check that bad frag_index set by constructor arg raises error
# as soon as diskfile is constructed, before data is written
self.assertRaises(DiskFileError, self._simple_get_diskfile,
policy=policy, frag_index=bad_value)
# bad frag_index set by metadata value
# (drive-by check that it is ok for constructor arg to be None)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=None)
ts = self.ts()
meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
'X-Timestamp': ts.internal,
'Content-Length': 0,
'Etag': EMPTY_ETAG,
'Content-Type': 'plain/text'}
with df.create() as writer:
try:
writer.put(meta)
self.fail('Expected DiskFileError for frag_index %s'
% bad_value)
except DiskFileError:
pass
# bad frag_index set by metadata value overrides ok constructor arg
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=2)
ts = self.ts()
meta = {'X-Object-Sysmeta-Ec-Frag-Index': bad_value,
'X-Timestamp': ts.internal,
'Content-Length': 0,
'Etag': EMPTY_ETAG,
'Content-Type': 'plain/text'}
with df.create() as writer:
try:
writer.put(meta)
self.fail('Expected DiskFileError for frag_index %s'
% bad_value)
except DiskFileError:
pass
def test_purge_one_fragment_index(self):
ts = self.ts()
for frag_index in (1, 2):
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
ts_meta = self.ts()
df.write_metadata({
'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Delete': 'me'
})
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#1#d.data',
ts.internal + '#2#d.data',
ts_meta.internal + '.meta',
])
df.purge(ts, 2)
# by default .meta file is not purged
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#1#d.data',
ts_meta.internal + '.meta',
])
def test_purge_final_fragment_index_and_meta(self):
ts = self.ts()
df = self._simple_get_diskfile(frag_index=1)
write_diskfile(df, ts)
ts_meta = self.ts()
df.write_metadata({
'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Delete': 'me',
})
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#1#d.data',
ts_meta.internal + '.meta',
])
df.purge(ts, 1, meta_timestamp=ts_meta)
self.assertFalse(os.path.exists(df._datadir))
def test_purge_final_fragment_index_and_not_meta(self):
ts = self.ts()
df = self._simple_get_diskfile(frag_index=1)
write_diskfile(df, ts)
ts_meta = self.ts()
df.write_metadata({
'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Delete': 'me',
})
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#1#d.data',
ts_meta.internal + '.meta',
])
df.purge(ts, 1, meta_timestamp=ts)
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts_meta.internal + '.meta',
])
def test_purge_last_fragment_index(self):
ts = self.ts()
frag_index = 0
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#0#d.data',
])
df.purge(ts, frag_index)
self.assertFalse(os.path.exists(df._datadir))
def test_purge_last_fragment_index_legacy_durable(self):
# a legacy durable file doesn't get purged in case another fragment is
# relying on it for durability
ts = self.ts()
frag_index = 0
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts, legacy_durable=True)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '#0.data',
ts.internal + '.durable',
])
df.purge(ts, frag_index)
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.durable',
])
def test_purge_non_existent_fragment_index(self):
ts = self.ts()
frag_index = 7
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#7#d.data',
])
df.purge(ts, 3)
# no effect
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#7#d.data',
])
def test_purge_old_timestamp_frag_index(self):
old_ts = self.ts()
ts = self.ts()
frag_index = 1
df = self._simple_get_diskfile(frag_index=frag_index)
write_diskfile(df, ts)
# sanity
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#1#d.data',
])
df.purge(old_ts, 1)
# no effect
self.assertEqual(os.listdir(df._datadir), [
ts.internal + '#1#d.data',
])
def test_purge_tombstone(self):
ts = self.ts()
df = self._simple_get_diskfile(frag_index=3)
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(ts, 3)
self.assertFalse(os.path.exists(df._datadir))
def test_purge_without_frag(self):
ts = self.ts()
df = self._simple_get_diskfile()
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(ts, None)
self.assertEqual(sorted(os.listdir(df._datadir)), [])
def test_purge_old_tombstone(self):
old_ts = self.ts()
ts = self.ts()
df = self._simple_get_diskfile(frag_index=5)
df.delete(ts)
# sanity
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
df.purge(old_ts, 5)
# no effect
self.assertEqual(sorted(os.listdir(df._datadir)), [
ts.internal + '.ts',
])
def test_purge_already_removed(self):
df = self._simple_get_diskfile(frag_index=6)
df.purge(self.ts(), 6) # no errors
# sanity
os.makedirs(df._datadir)
self.assertEqual(sorted(os.listdir(df._datadir)), [])
df.purge(self.ts(), 6)
# the directory was empty and has been removed
self.assertFalse(os.path.exists(df._datadir))
def _do_test_open_most_recent_durable(self, legacy_durable):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts = self.ts()
write_diskfile(df, ts, frag_index=3,
legacy_durable=legacy_durable)
metadata = {
'ETag': md5('test data').hexdigest(),
'X-Timestamp': ts.internal,
'Content-Length': str(len('test data')),
'X-Object-Sysmeta-Ec-Etag': 'fake-etag',
'X-Object-Sysmeta-Ec-Frag-Index': '3',
}
# add some .meta stuff
extra_meta = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': self.ts().internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(extra_meta)
# sanity
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
metadata.update(extra_meta)
self.assertEqual(metadata, df.read_metadata())
# add a newer datafile
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts = self.ts()
write_diskfile(df, ts, frag_index=3, commit=False,
legacy_durable=legacy_durable)
# N.B. don't make it durable
# and we still get the old metadata (same as if no .data!)
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertEqual(metadata, df.read_metadata())
def test_open_most_recent_durable(self):
self._do_test_open_most_recent_durable(False)
def test_open_most_recent_durable_legacy(self):
self._do_test_open_most_recent_durable(True)
def test_open_most_recent_missing_durable(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertRaises(DiskFileNotExist, df.read_metadata)
# now create a datafile missing durable
ts = self.ts()
write_diskfile(df, ts, frag_index=3, commit=False)
# add some .meta stuff
extra_meta = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': self.ts().internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(extra_meta)
# we still get the DiskFileNotExist (same as if no .data!)
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy,
frag_index=3)
self.assertRaises(DiskFileNotExist, df.read_metadata)
# sanity, without the frag_index kwarg
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
self.assertRaises(DiskFileNotExist, df.read_metadata)
def test_fragments(self):
ts_1 = self.ts()
self._get_open_disk_file(ts=ts_1.internal, frag_index=0)
df = self._get_open_disk_file(ts=ts_1.internal, frag_index=2)
self.assertEqual(df.fragments, {ts_1: [0, 2]})
# now add a newer datafile for frag index 3 but don't write a
# durable with it (so ignore the error when we try to open)
ts_2 = self.ts()
try:
df = self._get_open_disk_file(ts=ts_2.internal, frag_index=3,
commit=False)
except DiskFileNotExist:
pass
# sanity check: should have 3* .data
files = os.listdir(df._datadir)
self.assertEqual(3, len(files))
with df.open():
self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]})
def test_fragments_available_when_not_durable(self):
# verify frags available even if open fails e.g. if none are durable
ts_1 = self.ts()
ts_2 = self.ts()
for ts, fi in ((ts_1, 0), (ts_1, 2), (ts_2, 3)):
try:
df = self._get_open_disk_file(
ts=ts, frag_index=fi, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
# sanity check: should have 3* .data
files = os.listdir(df._datadir)
self.assertEqual(3, len(files))
self.assertRaises(DiskFileNotExist, df.open)
self.assertEqual(df.fragments, {ts_1: [0, 2], ts_2: [3]})
def test_fragments_not_open(self):
df = self._simple_get_diskfile()
self.assertIsNone(df.fragments)
def test_durable_timestamp_when_not_durable(self):
try:
self._get_open_disk_file(self.ts().internal, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no durable file so expect None
self.assertIsNone(df.durable_timestamp)
def test_durable_timestamp_missing_frag_index(self):
ts1 = self.ts()
self._get_open_disk_file(ts=ts1.internal, frag_index=1)
df = self._simple_get_diskfile(frag_index=2)
with self.assertRaises(DiskFileNotExist):
df.open()
# open() was attempted, but no data file for frag index so expect None
self.assertIsNone(df.durable_timestamp)
def test_durable_timestamp_newer_non_durable_data_file(self):
ts1 = self.ts()
self._get_open_disk_file(ts=ts1.internal)
ts2 = self.ts()
try:
self._get_open_disk_file(ts=ts2.internal, commit=False)
except DiskFileNotExist:
pass
df = self._simple_get_diskfile()
# sanity check - two .data files
self.assertEqual(2, len(os.listdir(df._datadir)))
df.open()
self.assertEqual(ts1, df.durable_timestamp)
def test_durable_timestamp_legacy_durable(self):
self._do_test_durable_timestamp(True)
def _test_open_with_fragment_preferences(self, legacy_durable=False):
policy = POLICIES.default
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
ts_1, ts_2, ts_3, ts_4 = (self.ts() for _ in range(4))
# create two durable frags, first with index 0
frag_0_metadata = write_diskfile(df, ts_1, frag_index=0,
legacy_durable=legacy_durable)
# second with index 3
frag_3_metadata = write_diskfile(df, ts_1, frag_index=3,
legacy_durable=legacy_durable)
# sanity check: should have 2 * .data plus possibly a .durable
self.assertEqual(3 if legacy_durable else 2,
len(os.listdir(df._datadir)))
# add some .meta stuff
meta_1_metadata = {
'X-Object-Meta-Foo': 'Bar',
'X-Timestamp': ts_2.internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(meta_1_metadata)
# sanity check: should have 2 * .data, possibly .durable, .meta
self.assertEqual(4 if legacy_durable else 3,
len(os.listdir(df._datadir)))
# sanity: should get frag index 3
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
expected = dict(frag_3_metadata)
expected.update(meta_1_metadata)
self.assertEqual(expected, df.read_metadata())
# add a newer datafile for frag index 2
# N.B. don't make it durable - skip call to commit()
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
frag_2_metadata = write_diskfile(df, ts_3, frag_index=2, commit=False,
data=b'new test data',
legacy_durable=legacy_durable)
# sanity check: should have 2* .data, possibly .durable, .meta, .data
self.assertEqual(5 if legacy_durable else 4,
len(os.listdir(df._datadir)))
# sanity check: with no frag preferences we get old metadata
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_2.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# with empty frag preferences we get metadata from newer non-durable
# data file
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=[])
self.assertEqual(frag_2_metadata, df.read_metadata())
self.assertEqual(ts_3.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# check we didn't destroy any potentially valid data by opening the
# non-durable data file
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
# now add some newer .meta stuff which should replace older .meta
meta_2_metadata = {
'X-Object-Meta-Foo': 'BarBarBarAnne',
'X-Timestamp': ts_4.internal,
}
df = df_mgr.get_diskfile(self.existing_device, '0',
'a', 'c', 'o', policy=policy)
df.write_metadata(meta_2_metadata)
# sanity check: should have 2 * .data, possibly .durable, .data, .meta
self.assertEqual(5 if legacy_durable else 4,
len(os.listdir(df._datadir)))
# sanity check: with no frag preferences we get newer metadata applied
# to durable data file
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# with empty frag preferences we still get metadata from newer .meta
# but applied to non-durable data file
expected = dict(frag_2_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=[])
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# check we didn't destroy any potentially valid data by opening the
# non-durable data file
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# prefer frags at ts_1, exclude no indexes, expect highest frag index
prefs = [{'timestamp': ts_1.internal, 'exclude': []},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_3.internal, 'exclude': []}]
expected = dict(frag_3_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# prefer frags at ts_1, exclude frag index 3 so expect frag index 0
prefs = [{'timestamp': ts_1.internal, 'exclude': [3]},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_3.internal, 'exclude': []}]
expected = dict(frag_0_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# now make ts_3 the preferred timestamp, excluded indexes don't exist
prefs = [{'timestamp': ts_3.internal, 'exclude': [4, 5, 6]},
{'timestamp': ts_2.internal, 'exclude': []},
{'timestamp': ts_1.internal, 'exclude': []}]
expected = dict(frag_2_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_3.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
# now make ts_2 the preferred timestamp - there are no frags at ts_2,
# next preference is ts_3 but index 2 is excluded, then at ts_1 index 3
# is excluded so we get frag 0 at ts_1
prefs = [{'timestamp': ts_2.internal, 'exclude': [1]},
{'timestamp': ts_3.internal, 'exclude': [2]},
{'timestamp': ts_1.internal, 'exclude': [3]}]
expected = dict(frag_0_metadata)
expected.update(meta_2_metadata)
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=prefs)
self.assertEqual(expected, df.read_metadata())
self.assertEqual(ts_4.internal, df.timestamp)
self.assertEqual(ts_1.internal, df.data_timestamp)
self.assertEqual(ts_1.internal, df.durable_timestamp)
self.assertEqual({ts_1: [0, 3], ts_3: [2]}, df.fragments)
def test_open_with_fragment_preferences_legacy_durable(self):
self._test_open_with_fragment_preferences(legacy_durable=True)
def test_open_with_fragment_preferences(self):
self._test_open_with_fragment_preferences(legacy_durable=False)
def test_open_with_bad_fragment_preferences(self):
policy = POLICIES.default
df_mgr = self.df_router[policy]
for bad in (
'ouch',
2,
[{'timestamp': '1234.5678', 'excludes': [1]}, {}],
[{'timestamp': 'not a timestamp', 'excludes': [1, 2]}],
[{'timestamp': '1234.5678', 'excludes': [1, -1]}],
[{'timestamp': '1234.5678', 'excludes': 1}],
[{'timestamp': '1234.5678'}],
[{'excludes': [1, 2]}]
):
try:
df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_prefs=bad)
self.fail('Expected DiskFileError for bad frag_prefs: %r'
% bad)
except DiskFileError as e:
self.assertIn('frag_prefs', str(e))
def test_disk_file_app_iter_ranges_checks_only_aligned_frag_data(self):
policy = POLICIES.default
frag_size = policy.fragment_size
# make sure there are two fragment size worth of data on disk
data = b'ab' * policy.ec_segment_size
df, df_data = self._create_test_file(data)
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
# each range uses a fresh reader app_iter_range which triggers a disk
# read at the range offset - make sure each of those disk reads will
# fetch an amount of data from disk that is greater than but not equal
# to a fragment size
reader._disk_chunk_size = int(frag_size * 1.5)
with mock.patch.object(
reader._diskfile.policy.pyeclib_driver, 'get_metadata')\
as mock_get_metadata:
it = reader.app_iter_ranges(
[(0, 10), (10, 20),
(frag_size + 20, frag_size + 30)],
'plain/text', '\r\n--someheader\r\n', len(df_data))
value = b''.join(it)
# check that only first range which starts at 0 triggers a frag check
self.assertEqual(1, mock_get_metadata.call_count)
self.assertIn(df_data[:10], value)
self.assertIn(df_data[10:20], value)
self.assertIn(df_data[frag_size + 20:frag_size + 30], value)
self.assertEqual(quarantine_msgs, [])
def test_reader_quarantines_corrupted_ec_archive(self):
# This has same purpose as
# TestAuditor.test_object_audit_checks_EC_fragments just making
# sure that checks happen in DiskFileReader layer.
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
def do_test(corrupted_frag_body, expected_offset, expected_read):
# expected_offset is offset at which corruption should be reported
# expected_read is number of bytes that should be read before the
# exception is raised
ts = self.ts()
write_diskfile(df, ts, corrupted_frag_body)
# at the open for the diskfile, no error occurred
# reading first corrupt frag is sufficient to detect the corruption
df.open()
with self.assertRaises(DiskFileQuarantined) as cm:
reader = df.reader()
reader._disk_chunk_size = int(policy.fragment_size)
bytes_read = 0
for chunk in reader:
bytes_read += len(chunk)
with self.assertRaises(DiskFileNotExist):
df.open()
self.assertEqual(expected_read, bytes_read)
self.assertEqual('Invalid EC metadata at offset 0x%x' %
expected_offset, cm.exception.args[0])
# TODO with liberasurecode < 1.2.0 the EC metadata verification checks
# only the magic number at offset 59 bytes into the frag so we'll
# corrupt up to and including that. Once liberasurecode >= 1.2.0 is
# required we should be able to reduce the corruption length.
corruption_length = 64
# corrupted first frag can be detected
corrupted_frag_body = (b' ' * corruption_length +
df_data[corruption_length:])
do_test(corrupted_frag_body, 0, 0)
# corrupted the second frag can be also detected
corrupted_frag_body = (df_data + b' ' * corruption_length +
df_data[corruption_length:])
do_test(corrupted_frag_body, len(df_data), len(df_data))
# if the second frag is shorter than frag size then corruption is
# detected when the reader is closed
corrupted_frag_body = (df_data + b' ' * corruption_length +
df_data[corruption_length:-10])
do_test(corrupted_frag_body, len(df_data), len(corrupted_frag_body))
def test_reader_ec_exception_causes_quarantine(self):
policy = POLICIES.default
def do_test(exception):
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
df.manager.logger.clear()
with mock.patch.object(df.policy.pyeclib_driver, 'get_metadata',
side_effect=exception):
df.open()
with self.assertRaises(DiskFileQuarantined) as cm:
for chunk in df.reader():
pass
with self.assertRaises(DiskFileNotExist):
df.open()
self.assertEqual('Invalid EC metadata at offset 0x0',
cm.exception.args[0])
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertIn('Quarantined object', log_lines[0])
self.assertIn('Invalid EC metadata at offset 0x0', log_lines[0])
do_test(pyeclib.ec_iface.ECInvalidFragmentMetadata('testing'))
do_test(pyeclib.ec_iface.ECBadFragmentChecksum('testing'))
do_test(pyeclib.ec_iface.ECInvalidParameter('testing'))
def test_reader_ec_exception_does_not_cause_quarantine(self):
# ECDriverError should not cause quarantine, only certain subclasses
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
with mock.patch.object(
df.policy.pyeclib_driver, 'get_metadata',
side_effect=pyeclib.ec_iface.ECDriverError('testing')):
df.open()
read_data = b''.join([d for d in df.reader()])
self.assertEqual(df_data, read_data)
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertIn('Problem checking EC fragment', log_lines[0])
df.open() # not quarantined
def test_reader_frag_check_does_not_quarantine_if_its_not_binary(self):
# This may look weird but for super-safety, check the
# ECDiskFileReader._frag_check doesn't quarantine when non-binary
# type chunk incomming (that would occurre only from coding bug)
policy = POLICIES.default
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
timestamp=self.ts())
df.open()
for invalid_type_chunk in (None, [], [[]], 1):
reader = df.reader()
reader._check_frag(invalid_type_chunk)
# None and [] are just skipped and [[]] and 1 are detected as invalid
# chunks
log_lines = df.manager.logger.get_lines_for_level('warning')
self.assertEqual(2, len(log_lines))
for log_line in log_lines:
self.assertIn(
'Unexpected fragment data type (not quarantined)', log_line)
df.open() # not quarantined
def test_ondisk_data_info_has_durable_key(self):
# non-durable; use frag_prefs=[] to allow it to be opened
df = self._simple_get_diskfile(obj='o1', frag_prefs=[])
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o1'}, commit=False)
with df.open():
self.assertIn('durable', df._ondisk_info['data_info'])
self.assertFalse(df._ondisk_info['data_info']['durable'])
# durable
df = self._simple_get_diskfile(obj='o2')
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o2'})
with df.open():
self.assertIn('durable', df._ondisk_info['data_info'])
self.assertTrue(df._ondisk_info['data_info']['durable'])
# legacy durable
df = self._simple_get_diskfile(obj='o3')
self._create_ondisk_file(df, b'', ext='.data', timestamp=10,
metadata={'name': '/a/c/o3'},
legacy_durable=True)
with df.open():
data_info = df._ondisk_info['data_info']
# sanity check it is legacy with no #d part in filename
self.assertEqual(data_info['filename'], '0000000010.00000#2.data')
self.assertIn('durable', data_info)
self.assertTrue(data_info['durable'])
@patch_policies(with_ec_default=True)
class TestSuffixHashes(unittest.TestCase):
"""
This tests all things related to hashing suffixes and therefore
there's also few test methods for cleanup_ondisk_files as well
(because it's used by hash_suffix).
The public interface to suffix hashing is on the Manager::
* cleanup_ondisk_files(hsh_path)
* get_hashes(device, partition, suffixes, policy)
* invalidate_hash(suffix_dir)
The Manager.get_hashes method (used by the REPLICATE verb)
calls Manager._get_hashes (which may be an alias to the module
method get_hashes), which calls hash_suffix, which calls
cleanup_ondisk_files.
Outside of that, cleanup_ondisk_files and invalidate_hash are
used mostly after writing new files via PUT or DELETE.
Test methods are organized by::
* cleanup_ondisk_files tests - behaviors
* cleanup_ondisk_files tests - error handling
* invalidate_hash tests - behavior
* invalidate_hash tests - error handling
* get_hashes tests - hash_suffix behaviors
* get_hashes tests - hash_suffix error handling
* get_hashes tests - behaviors
* get_hashes tests - error handling
"""
def setUp(self):
skip_if_no_xattrs()
self.testdir = tempfile.mkdtemp()
self.logger = debug_logger('suffix-hash-test')
self.devices = os.path.join(self.testdir, 'node')
os.mkdir(self.devices)
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.conf = {
'swift_dir': self.testdir,
'devices': self.devices,
'mount_check': False,
}
self.df_router = diskfile.DiskFileRouter(self.conf, self.logger)
self._ts_iter = (Timestamp(t) for t in
itertools.count(int(time())))
self.policy = None
def ts(self):
"""
Timestamps - forever.
"""
return next(self._ts_iter)
def fname_to_ts_hash(self, fname):
"""
EC datafiles are only hashed by their timestamp
"""
return md5(fname.split('#', 1)[0]).hexdigest()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def iter_policies(self):
for policy in POLICIES:
self.policy = policy
yield policy
@contextmanager
def policy_in_message(self):
try:
yield
except AssertionError as err:
if not self.policy:
raise
policy_trailer = '\n\n... for policy %r' % self.policy
raise AssertionError(str(err) + policy_trailer)
def assertEqual(self, *args):
with self.policy_in_message():
unittest.TestCase.assertEqual(self, *args)
def get_different_suffix_df(self, df, **kwargs):
# returns diskfile in the same partition with different suffix
suffix_dir = os.path.dirname(df._datadir)
for i in itertools.count():
df2 = df._manager.get_diskfile(
os.path.basename(df._device_path),
df._datadir.split('/')[-3],
df._account,
df._container,
'o%d' % i,
policy=df.policy,
**kwargs)
suffix_dir2 = os.path.dirname(df2._datadir)
if suffix_dir != suffix_dir2:
return df2
def test_valid_suffix(self):
self.assertTrue(diskfile.valid_suffix(u'000'))
self.assertTrue(diskfile.valid_suffix('000'))
self.assertTrue(diskfile.valid_suffix('123'))
self.assertTrue(diskfile.valid_suffix('fff'))
self.assertFalse(diskfile.valid_suffix(list('123')))
self.assertFalse(diskfile.valid_suffix(123))
self.assertFalse(diskfile.valid_suffix(' 12'))
self.assertFalse(diskfile.valid_suffix('-00'))
self.assertFalse(diskfile.valid_suffix(u'-00'))
self.assertFalse(diskfile.valid_suffix('1234'))
def check_cleanup_ondisk_files(self, policy, input_files, output_files):
orig_unlink = os.unlink
file_list = list(input_files)
rmdirs = []
def mock_listdir(path):
return list(file_list)
def mock_unlink(path):
# timestamp 1 is a special tag to pretend a file disappeared
# between the listdir and unlink.
if '/0000000001.00000.' in path:
# Using actual os.unlink for a non-existent name to reproduce
# exactly what OSError it raises in order to prove that
# common.utils.remove_file is squelching the error - but any
# OSError would do.
orig_unlink(uuid.uuid4().hex)
file_list.remove(os.path.basename(path))
df_mgr = self.df_router[policy]
with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink,
'os.rmdir': rmdirs.append}):
if isinstance(output_files, Exception):
path = os.path.join(self.testdir, 'does-not-matter')
self.assertRaises(output_files.__class__,
df_mgr.cleanup_ondisk_files, path)
return
df_mgr.commit_window = 0
files = df_mgr.cleanup_ondisk_files('/whatever')['files']
self.assertEqual(files, output_files)
if files:
self.assertEqual(rmdirs, [])
else:
self.assertEqual(rmdirs, ['/whatever'])
# cleanup_ondisk_files tests - behaviors
def test_cleanup_ondisk_files_purge_data_newer_ts(self):
for policy in self.iter_policies():
# purge .data if there's a newer .ts
file1 = _make_datafilename(self.ts(), policy)
file2 = self.ts().internal + '.ts'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_purge_expired_ts(self):
for policy in self.iter_policies():
# purge older .ts files if there's a newer .data
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
timestamp = self.ts()
file3 = _make_datafilename(timestamp, policy, durable=False)
file_list = [file1, file2, file3]
expected = {
# no durable datafile means you can't get rid of the
# latest tombstone even if datafile is newer
EC_POLICY: [file3, file2],
REPL_POLICY: [file3],
}[policy.policy_type]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_purge_ts_newer_data(
self, policy, legacy_durable=False):
# purge .ts if there's a newer .data
file1 = self.ts().internal + '.ts'
timestamp = self.ts()
file2 = _make_datafilename(
timestamp, policy, durable=not legacy_durable)
file_list = [file1, file2]
expected = [file2]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_file = timestamp.internal + '.durable'
file_list.append(durable_file)
expected.insert(0, durable_file)
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_purge_ts_newer_data(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_purge_ts_newer_data(policy)
def test_cleanup_ondisk_files_purge_ts_newer_data_and_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_purge_ts_newer_data(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_purge_older_ts(self):
for policy in self.iter_policies():
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
file3 = _make_datafilename(self.ts(), policy, durable=False)
file4 = self.ts().internal + '.meta'
expected = {
# no durable means we can only throw out things before
# the latest tombstone
EC_POLICY: [file4, file3, file2],
# keep .meta and .data and purge all .ts files
REPL_POLICY: [file4, file3],
}[policy.policy_type]
file_list = [file1, file2, file3, file4]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(
self, policy, legacy_durable=False):
file1 = self.ts().internal + '.ts'
file2 = self.ts().internal + '.ts'
timestamp = self.ts()
file3 = _make_datafilename(
timestamp, policy, durable=not legacy_durable)
file_list = [file1, file2, file3]
expected = [file3]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_filename = timestamp.internal + '.durable'
file_list.append(durable_filename)
expected.insert(0, durable_filename)
file4 = self.ts().internal + '.meta'
file_list.append(file4)
expected.insert(0, file4)
# keep .meta and .data if meta newer than data and purge .ts
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keep_meta_data_purge_ts(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(policy)
def test_cleanup_ondisk_files_keep_meta_data_purge_ts_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keep_meta_data_purge_ts(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_keep_one_ts(self):
for policy in self.iter_policies():
file1, file2, file3 = [self.ts().internal + '.ts'
for i in range(3)]
file_list = [file1, file2, file3]
# keep only latest of multiple .ts files
self.check_cleanup_ondisk_files(policy, file_list, [file3])
def test_cleanup_ondisk_files_multi_data_file(self):
for policy in self.iter_policies():
file1 = _make_datafilename(self.ts(), policy, 1, durable=False)
file2 = _make_datafilename(self.ts(), policy, 2, durable=False)
file3 = _make_datafilename(self.ts(), policy, 3, durable=False)
expected = {
# keep all non-durable datafiles
EC_POLICY: [file3, file2, file1],
# keep only latest of multiple .data files
REPL_POLICY: [file3]
}[policy.policy_type]
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, expected)
def _do_test_cleanup_ondisk_files_keeps_one_datafile(self, policy,
legacy_durable=False):
timestamps = [self.ts() for i in range(3)]
file1 = _make_datafilename(timestamps[0], policy, 1,
durable=not legacy_durable)
file2 = _make_datafilename(timestamps[1], policy, 2,
durable=not legacy_durable)
file3 = _make_datafilename(timestamps[2], policy, 3,
durable=not legacy_durable)
file_list = [file1, file2, file3]
expected = [file3]
if policy.policy_type == EC_POLICY and legacy_durable:
for t in timestamps:
file_list.append(t.internal + '.durable')
expected.insert(0, file_list[-1])
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keeps_one_datafile(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keeps_one_datafile(policy)
def test_cleanup_ondisk_files_keeps_one_datafile_and_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keeps_one_datafile(
policy, legacy_durable=True)
def _do_test_cleanup_ondisk_files_keep_one_meta(self, policy,
legacy_durable=False):
# keep only latest of multiple .meta files
t_data = self.ts()
file1 = _make_datafilename(t_data, policy, durable=not legacy_durable)
file2, file3 = [self.ts().internal + '.meta' for i in range(2)]
file_list = [file1, file2, file3]
expected = [file3, file1]
if policy.policy_type == EC_POLICY and legacy_durable:
durable_file = t_data.internal + '.durable'
file_list.append(durable_file)
expected.insert(1, durable_file)
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_keep_one_meta(self):
for policy in self.iter_policies():
self._do_test_cleanup_ondisk_files_keep_one_meta(policy)
def test_cleanup_ondisk_files_keep_one_meta_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_test_cleanup_ondisk_files_keep_one_meta(
policy, legacy_durable=True)
def test_cleanup_ondisk_files_only_meta(self):
for policy in self.iter_policies():
file1, file2 = [self.ts().internal + '.meta' for i in range(2)]
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_ignore_orphaned_ts(self):
for policy in self.iter_policies():
# A more recent orphaned .meta file will prevent old .ts files
# from being cleaned up otherwise
file1, file2 = [self.ts().internal + '.ts' for i in range(2)]
file3 = self.ts().internal + '.meta'
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, [file3, file2])
def test_cleanup_ondisk_files_purge_old_data_only(self):
for policy in self.iter_policies():
# Oldest .data will be purge, .meta and .ts won't be touched
file1 = _make_datafilename(self.ts(), policy)
file2 = self.ts().internal + '.ts'
file3 = self.ts().internal + '.meta'
file_list = [file1, file2, file3]
self.check_cleanup_ondisk_files(policy, file_list, [file3, file2])
def test_cleanup_ondisk_files_purge_old_ts(self):
for policy in self.iter_policies():
# A single old .ts file will be removed
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.ts'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_keep_isolated_meta_purge_old_ts(self):
for policy in self.iter_policies():
# A single old .ts file will be removed despite presence of a .meta
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.ts'
file2 = Timestamp(time() + 2).internal + '.meta'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [file2])
def test_cleanup_ondisk_files_keep_single_old_data(self):
for policy in self.iter_policies():
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = _make_datafilename(
Timestamp(old_float), policy, durable=True)
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, file_list)
def test_cleanup_ondisk_drops_old_non_durable_data(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = _make_datafilename(
Timestamp(old_float), policy, durable=False)
file_list = [file1]
# for EC an isolated old non-durable .data file is removed
expected = []
self.check_cleanup_ondisk_files(policy, file_list, expected)
def test_cleanup_ondisk_files_drops_isolated_durable(self):
# check behaviour for legacy durable files
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
file1 = Timestamp.now().internal + '.durable'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_purges_single_old_meta(self):
for policy in self.iter_policies():
# A single old .meta file will be removed
old_float = time() - (diskfile.DEFAULT_RECLAIM_AGE + 1)
file1 = Timestamp(old_float).internal + '.meta'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
# cleanup_ondisk_files tests - error handling
def test_cleanup_ondisk_files_hsh_path_enoent(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# common.utils.listdir *completely* mutes ENOENT
path = os.path.join(self.testdir, 'does-not-exist')
self.assertEqual(df_mgr.cleanup_ondisk_files(path)['files'], [])
def test_cleanup_ondisk_files_hsh_path_other_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
with mock.patch('os.listdir') as mock_listdir:
mock_listdir.side_effect = OSError('kaboom!')
# but it will raise other OSErrors
path = os.path.join(self.testdir, 'does-not-matter')
self.assertRaises(OSError, df_mgr.cleanup_ondisk_files,
path)
def test_cleanup_ondisk_files_reclaim_tombstone_remove_file_error(self):
for policy in self.iter_policies():
# Timestamp 1 makes the check routine pretend the file
# disappeared after listdir before unlink.
file1 = '0000000001.00000.ts'
file_list = [file1]
self.check_cleanup_ondisk_files(policy, file_list, [])
def test_cleanup_ondisk_files_older_remove_file_error(self):
for policy in self.iter_policies():
# Timestamp 1 makes the check routine pretend the file
# disappeared after listdir before unlink.
file1 = _make_datafilename(Timestamp(1), policy)
file2 = '0000000002.00000.ts'
file_list = [file1, file2]
self.check_cleanup_ondisk_files(policy, file_list, [])
# invalidate_hash tests - behavior
def test_invalidate_hash_file_does_not_exist(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
inv_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# sanity, new partition has no suffix hashing artifacts
self.assertFalse(os.path.exists(hashes_file))
self.assertFalse(os.path.exists(inv_file))
# invalidating a hash does not create the hashes_file
with mock.patch(
'swift.obj.diskfile.BaseDiskFileManager.invalidate_hash',
side_effect=diskfile.invalidate_hash) \
as mock_invalidate_hash:
df.delete(self.ts())
self.assertFalse(os.path.exists(hashes_file))
# ... but does invalidate the suffix
self.assertEqual([mock.call(suffix_dir)],
mock_invalidate_hash.call_args_list)
with open(inv_file) as f:
self.assertEqual(suffix, f.read().strip('\n'))
# ... and hashing suffixes finds (and hashes) the new suffix
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertTrue(os.path.exists(hashes_file))
self.assertIn(os.path.basename(suffix_dir), hashes)
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# ... and truncates the invalidations file
with open(inv_file) as f:
self.assertEqual('', f.read().strip('\n'))
def test_invalidate_hash_empty_file_exists(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
pkl_path = os.path.join(part_path, diskfile.HASH_FILE)
self.assertTrue(os.path.exists(pkl_path))
self.assertEqual(hashes, {})
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes) # sanity
def test_invalidate_hash_file_not_truncated_when_empty(self):
orig_open = open
def watch_open(*args, **kargs):
name = os.path.basename(args[0])
open_log[name].append(args[1])
return orig_open(*args, **kargs)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
inv_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
hash_file = os.path.join(
part_path, diskfile.HASH_FILE)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(hash_file))
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
self.assertTrue(os.path.exists(inv_file))
# invalidation file created, lets consolidate it
df_mgr.get_hashes('sda1', '0', [], policy)
open_log = defaultdict(list)
open_loc = '__builtin__.open' if six.PY2 else 'builtins.open'
with mock.patch(open_loc, watch_open):
self.assertTrue(os.path.exists(inv_file))
# no new suffixes get invalidated... so no write iop
df_mgr.get_hashes('sda1', '0', [], policy)
# each file is opened once to read
expected = {
'hashes.pkl': ['rb'],
'hashes.invalid': ['r'],
}
self.assertEqual(open_log, expected)
def _test_invalidate_hash_racing_get_hashes_diff_suffix(self, existing):
# a suffix can be changed or created by second process while new pkl is
# being calculated - verify that suffix is correct after next
# get_hashes call
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
if existing:
mkdirs(part_path)
# force hashes.pkl to exist
df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
orig_listdir = os.listdir
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
df2 = self.get_different_suffix_df(df)
suffix2 = os.path.basename(os.path.dirname(df2._datadir))
non_local = {'df2touched': False}
df.delete(self.ts())
def mock_listdir(*args, **kwargs):
# simulating an invalidation occurring in another process while
# get_hashes is executing
result = orig_listdir(*args, **kwargs)
if not non_local['df2touched']:
non_local['df2touched'] = True
# other process creates new suffix
df2.delete(self.ts())
return result
if not existing:
self.assertFalse(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
with mock.patch('swift.obj.diskfile.os.listdir',
mock_listdir):
# creates pkl file if not already there
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
# second suffix added after directory listing, it's added later
self.assertIn(suffix, hashes)
self.assertNotIn(suffix2, hashes)
# updates pkl file
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIn(suffix2, hashes)
def test_invalidate_hash_racing_get_hashes_diff_suffix_new_part(self):
self._test_invalidate_hash_racing_get_hashes_diff_suffix(False)
def test_invalidate_hash_racing_get_hashes_diff_suffix_existing_part(self):
self._test_invalidate_hash_racing_get_hashes_diff_suffix(True)
def _check_hash_invalidations_race_get_hashes_same_suffix(self, existing):
# verify that when two processes concurrently call get_hashes, then any
# concurrent hash invalidation will survive and be consolidated on a
# subsequent call to get_hashes (i.e. ensure first get_hashes process
# does not ignore the concurrent hash invalidation that second
# get_hashes might have consolidated to hashes.pkl)
non_local = {}
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
orig_hash_suffix = df_mgr._hash_suffix
if existing:
# create hashes.pkl
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
part_dir = os.path.dirname(suffix_dir)
invalidations_file = os.path.join(
part_dir, diskfile.HASH_INVALIDATIONS_FILE)
non_local['hash'] = None
non_local['called'] = False
# delete will append suffix to hashes.invalid
df.delete(self.ts())
with open(invalidations_file) as f:
self.assertEqual(suffix, f.read().strip('\n')) # sanity
hash1 = df_mgr._hash_suffix(suffix_dir)
def mock_hash_suffix(*args, **kwargs):
# after first get_hashes has called _hash_suffix, simulate a
# second process invalidating the same suffix, followed by a
# third process calling get_hashes and failing (or yielding)
# after consolidate_hashes has completed
result = orig_hash_suffix(*args, **kwargs)
if not non_local['called']:
non_local['called'] = True
# appends suffix to hashes.invalid
df.delete(self.ts())
# simulate another process calling get_hashes but failing
# after hash invalidation have been consolidated
hashes = df_mgr.consolidate_hashes(part_dir)
if existing:
self.assertTrue(hashes['valid'])
else:
self.assertFalse(hashes['valid'])
# get the updated suffix hash...
non_local['hash'] = orig_hash_suffix(suffix_dir)
return result
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
# repeats listing when pkl modified
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# first get_hashes should complete with suffix1 state
self.assertIn(suffix, hashes)
# sanity check - the suffix hash has changed...
self.assertNotEqual(hash1, non_local['hash'])
# the invalidation file has been truncated...
with open(invalidations_file, 'r') as f:
self.assertEqual('', f.read())
# so hashes should have the latest suffix hash...
self.assertEqual(hashes[suffix], non_local['hash'])
non_local['called'] = False
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
df_mgr.get_hashes('sda1', '0', [suffix], policy,
skip_rehash=True)
self.assertFalse(non_local['called'])
with open(invalidations_file) as f:
self.assertEqual(suffix, f.read().strip('\n')) # sanity
def test_hash_invalidations_race_get_hashes_same_suffix_new(self):
self._check_hash_invalidations_race_get_hashes_same_suffix(False)
def test_hash_invalidations_race_get_hashes_same_suffix_existing(self):
self._check_hash_invalidations_race_get_hashes_same_suffix(True)
def _check_unpickle_error_and_get_hashes_failure(self, existing):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
if existing:
df.delete(self.ts())
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
df.delete(self.ts())
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
# write a corrupt hashes.pkl
open(hashes_file, 'w')
# simulate first call to get_hashes failing after attempting to
# consolidate hashes
with mock.patch('swift.obj.diskfile.os.listdir',
side_effect=Exception()):
self.assertRaises(
Exception, df_mgr.get_hashes, 'sda1', '0', [], policy)
# sanity on-disk state is invalid
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertEqual(False, found_hashes.pop('valid'))
# verify subsequent call to get_hashes reaches correct outcome
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertEqual([], df_mgr.logger.get_lines_for_level('warning'))
def test_unpickle_error_and_get_hashes_failure_new_part(self):
self._check_unpickle_error_and_get_hashes_failure(False)
def test_unpickle_error_and_get_hashes_failure_existing_part(self):
self._check_unpickle_error_and_get_hashes_failure(True)
def test_invalidate_hash_consolidation(self):
def assert_consolidation(suffixes):
# verify that suffixes are invalidated after consolidation
with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
hashes = df_mgr.consolidate_hashes(part_path)
self.assertTrue(mock_lock.called)
for suffix in suffixes:
self.assertIn(suffix, hashes)
self.assertIsNone(hashes[suffix])
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
with open(invalidations_file, 'r') as f:
self.assertEqual("", f.read())
return hashes
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
original_hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, original_hashes) # sanity
self.assertIsNotNone(original_hashes[suffix])
# sanity check hashes file
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(original_hashes, found_hashes)
# invalidate the hash
with mock.patch('swift.obj.diskfile.lock_path') as mock_lock:
df_mgr.invalidate_hash(suffix_dir)
self.assertTrue(mock_lock.called)
# suffix should be in invalidations file
with open(invalidations_file, 'r') as f:
self.assertEqual(suffix + "\n", f.read())
# hashes file is unchanged
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(original_hashes, found_hashes)
# consolidate the hash and the invalidations
hashes = assert_consolidation([suffix])
# invalidate a different suffix hash in same partition but not in
# existing hashes.pkl
df2 = self.get_different_suffix_df(df)
df2.delete(self.ts())
suffix_dir2 = os.path.dirname(df2._datadir)
suffix2 = os.path.basename(suffix_dir2)
# suffix2 should be in invalidations file
with open(invalidations_file, 'r') as f:
self.assertEqual(suffix2 + "\n", f.read())
# hashes file is not yet changed
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
# consolidate hashes
hashes = assert_consolidation([suffix, suffix2])
# invalidating suffix2 multiple times is ok
df2.delete(self.ts())
df2.delete(self.ts())
# suffix2 should be in invalidations file
with open(invalidations_file, 'r') as f:
invalids = f.read().splitlines()
self.assertEqual(sorted((suffix2, suffix2)),
sorted(invalids)) # sanity
# hashes file is not yet changed
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
self.assertTrue(hashes['valid'])
self.assertEqual(hashes, found_hashes)
# consolidate hashes
assert_consolidation([suffix, suffix2])
def test_get_hashes_consolidates_suffix_rehash_once(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
df.delete(self.ts())
suffix_dir = os.path.dirname(df._datadir)
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=df_mgr.consolidate_hashes
) as mock_consolidate_hashes, \
mock.patch.object(df_mgr, '_hash_suffix',
side_effect=df_mgr._hash_suffix
) as mock_hash_suffix:
# creates pkl file
df_mgr.get_hashes('sda1', '0', [], policy)
mock_consolidate_hashes.assert_called_once()
self.assertEqual([mock.call(suffix_dir, policy=policy)],
mock_hash_suffix.call_args_list)
# second object in path
df2 = self.get_different_suffix_df(df)
df2.delete(self.ts())
suffix_dir2 = os.path.dirname(df2._datadir)
mock_consolidate_hashes.reset_mock()
mock_hash_suffix.reset_mock()
# updates pkl file
df_mgr.get_hashes('sda1', '0', [], policy)
mock_consolidate_hashes.assert_called_once()
self.assertEqual([mock.call(suffix_dir2, policy=policy)],
mock_hash_suffix.call_args_list)
def test_consolidate_hashes_raises_exception(self):
# verify that if consolidate_hashes raises an exception then suffixes
# are rehashed and a hashes.pkl is written
for policy in self.iter_policies():
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
self.logger.clear()
df_mgr = self.df_router[policy]
# create something to hash
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
# avoid getting O_TMPFILE warning in logs
if not utils.o_tmpfile_in_tmpdir_supported():
df.manager.use_linkat = False
self.assertFalse(os.path.exists(part_path))
df.delete(self.ts())
self.assertTrue(os.path.exists(invalidations_file))
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# no pre-existing hashes.pkl
self.assertFalse(os.path.exists(hashes_file))
with mock.patch.object(df_mgr, '_hash_suffix',
return_value='fake hash'):
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=Exception()):
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual({suffix: 'fake hash'}, hashes)
# sanity check hashes file
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# sanity check log warning
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
# repeat with pre-existing hashes.pkl
self.logger.clear()
with mock.patch.object(df_mgr, '_hash_suffix',
return_value='new fake hash'):
with mock.patch.object(df_mgr, 'consolidate_hashes',
side_effect=Exception()):
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual({suffix: 'new fake hash'}, hashes)
# sanity check hashes file
with open(hashes_file, 'rb') as f:
found_hashes = pickle.load(f)
found_hashes.pop('updated')
self.assertTrue(found_hashes.pop('valid'))
self.assertEqual(hashes, found_hashes)
# sanity check log warning
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
# invalidate_hash tests - error handling
def test_invalidate_hash_bad_pickle(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make some valid data
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
df.delete(self.ts())
# sanity check hashes file
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path, diskfile.HASH_FILE)
self.assertFalse(os.path.exists(hashes_file))
# write some garbage in hashes file
with open(hashes_file, 'w') as f:
f.write('asdf')
# invalidate_hash silently *NOT* repair invalid data
df_mgr.invalidate_hash(suffix_dir)
with open(hashes_file) as f:
self.assertEqual(f.read(), 'asdf')
# ... but get_hashes will
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
# get_hashes tests - hash_suffix behaviors
def test_hash_suffix_one_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a tombstone
timestamp = self.ts()
df.delete(timestamp)
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_tombstone_and_one_meta(self):
# A tombstone plus a newer meta file can happen if a tombstone is
# replicated to a node with a newer meta file but older data file. The
# meta file will be ignored when the diskfile is opened so the
# effective state of the disk files is equivalent to only having the
# tombstone. Replication cannot remove the meta file, and the meta file
# cannot be ssync replicated to a node with only the tombstone, so
# we want the get_hashes result to be the same as if the meta file was
# not there.
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a tombstone
timestamp = self.ts()
df.delete(timestamp)
# write a meta file
df.write_metadata({'X-Timestamp': self.ts().internal})
# sanity check
self.assertEqual(2, len(os.listdir(df._datadir)))
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_reclaim_tombstone_and_one_meta(self):
# An isolated meta file can happen if a tombstone is replicated to a
# node with a newer meta file but older data file, and the tombstone is
# subsequently reclaimed. The meta file will be ignored when the
# diskfile is opened so the effective state of the disk files is
# equivalent to having no files.
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
continue
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix = os.path.basename(os.path.dirname(df._datadir))
now = time()
# write a tombstone that's just a *little* older than reclaim time
df.delete(Timestamp(now - 1001))
# write a meta file that's not quite so old
ts_meta = Timestamp(now - 501)
df.write_metadata({'X-Timestamp': ts_meta.internal})
# sanity check
self.assertEqual(2, len(os.listdir(df._datadir)))
# scale back the df manager's reclaim age a bit to make the
# tombstone reclaimable
df_mgr.reclaim_age = 1000
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# the tombstone is reclaimed, the meta file remains, the suffix
# hash is not updated BUT the suffix dir cannot be deleted so
# a suffix hash equal to hash of empty string is reported.
# TODO: this is not same result as if the meta file did not exist!
self.assertEqual([ts_meta.internal + '.meta'],
os.listdir(df._datadir))
self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING})
# scale back the df manager's reclaim age even more - call to
# get_hashes does not trigger reclaim because the suffix has
# MD5_OF_EMPTY_STRING in hashes.pkl
df_mgr.reclaim_age = 500
df_mgr.commit_window = 0
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual([ts_meta.internal + '.meta'],
os.listdir(df._datadir))
self.assertEqual(hashes, {suffix: MD5_OF_EMPTY_STRING})
# call get_hashes with recalculate = [suffix] and the suffix dir
# gets re-hashed so the .meta if finally reclaimed.
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertFalse(os.path.exists(os.path.dirname(df._datadir)))
self.assertEqual(hashes, {})
def test_hash_suffix_one_reclaim_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
# scale back this tests manager's reclaim age a bit
df_mgr.reclaim_age = 1000
# write a tombstone that's just a *little* older
old_time = time() - 1001
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
def test_hash_suffix_ts_cleanup_after_recalc(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# scale back reclaim age a bit
df_mgr.reclaim_age = 1000
# write a valid tombstone
old_time = time() - 500
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# we have tombstone entry
tombstone = '%s.ts' % timestamp.internal
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
# lower reclaim age to force tombstone reclaiming
df_mgr.reclaim_age = 200
# not cleaning up because suffix not invalidated
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# recalculating suffix hash cause cleanup
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertEqual(hashes, {})
self.assertFalse(os.path.exists(df._datadir))
def test_hash_suffix_ts_cleanup_after_invalidate_hash(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# scale back reclaim age a bit
df_mgr.reclaim_age = 1000
# write a valid tombstone
old_time = time() - 500
timestamp = Timestamp(old_time)
df.delete(timestamp.internal)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# we have tombstone entry
tombstone = '%s.ts' % timestamp.internal
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
# lower reclaim age to force tombstone reclaiming
df_mgr.reclaim_age = 200
# not cleaning up because suffix not invalidated
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertTrue(os.path.exists(df._datadir))
self.assertIn(tombstone, os.listdir(df._datadir))
self.assertIn(suffix, hashes)
self.assertIsNotNone(hashes[suffix])
# However if we call invalidate_hash for the suffix dir,
# get_hashes can reclaim the tombstone
with mock.patch('swift.obj.diskfile.lock_path'):
df_mgr.invalidate_hash(suffix_dir)
# updating invalidated hashes cause cleanup
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, {})
self.assertFalse(os.path.exists(df._datadir))
def test_hash_suffix_one_reclaim_and_one_valid_tombstone(self):
paths, suffix = find_paths_with_matching_suffixes(2, 1)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
a, c, o = paths[suffix][0]
df1 = df_mgr.get_diskfile(
'sda1', '0', a, c, o, policy=policy)
# scale back this tests manager's reclaim age a bit
df_mgr.reclaim_age = 1000
# write one tombstone that's just a *little* older
df1.delete(Timestamp(time() - 1001))
# create another tombstone in same suffix dir that's newer
a, c, o = paths[suffix][1]
df2 = df_mgr.get_diskfile(
'sda1', '0', a, c, o, policy=policy)
t_df2 = Timestamp(time() - 900)
df2.delete(t_df2)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
suffix = os.path.basename(os.path.dirname(df1._datadir))
df2_tombstone_hash = md5(t_df2.internal + '.ts').hexdigest()
expected = {
REPL_POLICY: {suffix: df2_tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: df2_tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
def test_hash_suffix_one_datafile(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(
'sda1', '0', 'a', 'c', 'o', policy=policy, frag_index=7)
suffix = os.path.basename(os.path.dirname(df._datadir))
# write a datafile
timestamp = self.ts()
with df.create() as writer:
test_data = b'test file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
# note - no commit so data is non-durable
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
datafile_hash = md5({
EC_POLICY: timestamp.internal,
REPL_POLICY: timestamp.internal + '.data',
}[policy.policy_type]).hexdigest()
expected = {
REPL_POLICY: {suffix: datafile_hash},
EC_POLICY: {suffix: {
# because there's no durable state, we have no hash for
# the None key - only the frag index for the data file
7: datafile_hash}},
}[policy.policy_type]
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_multi_file_ends_in_tombstone(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
mkdirs(df._datadir)
now = time()
# go behind the scenes and setup a bunch of weird file names
for tdiff in [500, 100, 10, 1]:
for suff in ['.meta', '.data', '.ts']:
timestamp = Timestamp(now - tdiff)
filename = timestamp.internal
if policy.policy_type == EC_POLICY and suff == '.data':
filename += '#%s' % df._frag_index
filename += suff
open(os.path.join(df._datadir, filename), 'w').close()
tombstone_hash = md5(filename).hexdigest()
# call get_hashes and it should clean things up
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
expected = {
REPL_POLICY: {suffix: tombstone_hash},
EC_POLICY: {suffix: {
# fi is None here because we have a tombstone
None: tombstone_hash}},
}[policy.policy_type]
self.assertEqual(hashes, expected)
# only the tombstone should be left
found_files = os.listdir(df._datadir)
self.assertEqual(found_files, [filename])
def _do_hash_suffix_multi_file_ends_in_datafile(self, policy,
legacy_durable):
# if legacy_durable is True then synthesize legacy durable files
# instead of having a durable marker in the data file name
frag_index = 4
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o', policy=policy,
frag_index=frag_index)
suffix = os.path.basename(os.path.dirname(df._datadir))
mkdirs(df._datadir)
now = time()
timestamp = None
# go behind the scenes and setup a bunch of weird file names
for tdiff in [500, 100, 10, 1]:
suffs = ['.meta', '.data']
if tdiff > 50:
suffs.append('.ts')
if policy.policy_type == EC_POLICY and legacy_durable:
suffs.append('.durable')
for suff in suffs:
timestamp = Timestamp(now - tdiff)
if suff == '.data':
filename = _make_datafilename(
timestamp, policy, frag_index,
durable=not legacy_durable)
else:
filename = timestamp.internal + suff
open(os.path.join(df._datadir, filename), 'w').close()
meta_timestamp = Timestamp(now)
metadata_filename = meta_timestamp.internal + '.meta'
open(os.path.join(df._datadir, metadata_filename), 'w').close()
# call get_hashes and it should clean up all but the most recent files
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
# calculate expected outcome
data_filename = _make_datafilename(
timestamp, policy, frag_index, durable=not legacy_durable)
expected_files = [data_filename, metadata_filename]
if policy.policy_type == EC_POLICY:
# note: expected hashes is same with or without legacy durable file
hasher = md5()
hasher.update(metadata_filename)
hasher.update(timestamp.internal + '.durable')
expected = {
suffix: {
# metadata & durable updates are hashed separately
None: hasher.hexdigest(),
4: self.fname_to_ts_hash(data_filename),
}
}
if legacy_durable:
expected_files.append(timestamp.internal + '.durable')
elif policy.policy_type == REPL_POLICY:
hasher = md5()
hasher.update(metadata_filename)
hasher.update(data_filename)
expected = {suffix: hasher.hexdigest()}
else:
self.fail('unknown policy type %r' % policy.policy_type)
self.assertEqual(hashes, expected)
# only the meta and data should be left
self.assertEqual(sorted(os.listdir(df._datadir)),
sorted(expected_files))
def test_hash_suffix_multifile_ends_in_datafile(self):
for policy in self.iter_policies():
self._do_hash_suffix_multi_file_ends_in_datafile(
policy, legacy_durable=False)
def test_hash_suffix_multifile_ends_in_datafile_legacy_durable(self):
for policy in self.iter_policies():
if policy.policy_type == EC_POLICY:
self._do_hash_suffix_multi_file_ends_in_datafile(
policy, legacy_durable=True)
def _verify_get_hashes(self, filenames, ts_data, ts_meta, ts_ctype,
policy):
"""
Helper method to create a set of ondisk files and verify suffix_hashes.
:param filenames: list of filenames to create in an object hash dir
:param ts_data: newest data timestamp, used for expected result
:param ts_meta: newest meta timestamp, used for expected result
:param ts_ctype: newest content-type timestamp, used for expected
result
:param policy: storage policy to use for test
"""
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
partition_dir = os.path.dirname(os.path.dirname(df._datadir))
rmtree(partition_dir, ignore_errors=True) # clean dir for each test
mkdirs(df._datadir)
# calculate expected result
hasher = md5()
if policy.policy_type == EC_POLICY:
hasher.update(ts_meta.internal + '.meta')
hasher.update(ts_data.internal + '.durable')
if ts_ctype:
hasher.update(ts_ctype.internal + '_ctype')
expected = {
suffix: {
None: hasher.hexdigest(),
4: md5(ts_data.internal).hexdigest(),
}
}
elif policy.policy_type == REPL_POLICY:
hasher.update(ts_meta.internal + '.meta')
hasher.update(ts_data.internal + '.data')
if ts_ctype:
hasher.update(ts_ctype.internal + '_ctype')
expected = {suffix: hasher.hexdigest()}
else:
self.fail('unknown policy type %r' % policy.policy_type)
for fname in filenames:
open(os.path.join(df._datadir, fname), 'w').close()
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_with_older_content_type_in_meta(self):
# single meta file having older content-type
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_ctype, ts_meta = (
self.ts(), self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_meta(self):
# single meta file having same age content-type
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_meta = (self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, ts_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_obsolete_content_type_in_meta(self):
# After rsync replication we could have a single meta file having
# content-type older than a replicated data file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_ctype, ts_data, ts_meta = (self.ts(), self.ts(), self.ts())
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_meta, None, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_older_content_type_in_newer_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in newer meta file, older than newer meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_ctype, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_older_meta),
_make_metafilename(ts_newer_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_newer_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in newer meta file, at same age as newer meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(3))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta, ts_newer_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_newer_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_older_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, older than older meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_ctype, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_ctype, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_same_age_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, at same age as older meta file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(3))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_older_meta)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, ts_older_meta, policy)
do_test(False)
do_test(True)
def test_hash_suffix_with_obsolete_content_type_in_older_meta(self):
# After rsync replication we could have two meta files: newest
# content-type is in older meta file, but older than data file
def do_test(legacy_durable):
for policy in self.iter_policies():
ts_ctype, ts_data, ts_older_meta, ts_newer_meta = (
self.ts() for _ in range(4))
filenames = [_make_datafilename(ts_data, policy, frag_index=4,
durable=not legacy_durable),
_make_metafilename(ts_newer_meta),
_make_metafilename(ts_older_meta, ts_ctype)]
if policy.policy_type == EC_POLICY and legacy_durable:
filenames.append(ts_data.internal + '.durable')
self._verify_get_hashes(
filenames, ts_data, ts_newer_meta, None, policy)
do_test(False)
do_test(True)
def test_hash_suffix_removes_empty_hashdir_and_suffix(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy, frag_index=2)
os.makedirs(df._datadir)
self.assertTrue(os.path.exists(df._datadir)) # sanity
df_mgr.get_hashes('sda1', '0', [], policy)
suffix_dir = os.path.dirname(df._datadir)
self.assertFalse(os.path.exists(suffix_dir))
def test_hash_suffix_removes_empty_hashdirs_in_valid_suffix(self):
paths, suffix = find_paths_with_matching_suffixes(needed_matches=3,
needed_suffixes=0)
matching_paths = paths.pop(suffix)
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile('sda1', '0', *matching_paths[0],
policy=policy, frag_index=2)
# create a real, valid hsh_path
df.delete(Timestamp.now())
# and a couple of empty hsh_paths
empty_hsh_paths = []
for path in matching_paths[1:]:
fake_df = df_mgr.get_diskfile('sda1', '0', *path,
policy=policy)
os.makedirs(fake_df._datadir)
empty_hsh_paths.append(fake_df._datadir)
for hsh_path in empty_hsh_paths:
self.assertTrue(os.path.exists(hsh_path)) # sanity
# get_hashes will cleanup empty hsh_path and leave valid one
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertIn(suffix, hashes)
self.assertTrue(os.path.exists(df._datadir))
for hsh_path in empty_hsh_paths:
self.assertFalse(os.path.exists(hsh_path))
# get_hashes tests - hash_suffix error handling
def test_hash_suffix_listdir_enotdir(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
suffix = '123'
suffix_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0',
suffix)
os.makedirs(suffix_path)
self.assertTrue(os.path.exists(suffix_path)) # sanity
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
# suffix dir cleaned up by get_hashes
self.assertFalse(os.path.exists(suffix_path))
expected = {}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
# now make the suffix path a file
open(suffix_path, 'w').close()
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
expected = {}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_listdir_enoent(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(policy), '0')
mkdirs(part_path) # ensure we'll bother writing a pkl at all
orig_listdir = os.listdir
listdir_calls = []
def mock_listdir(path):
success = False
try:
rv = orig_listdir(path)
success = True
return rv
finally:
listdir_calls.append((path, success))
with mock.patch('swift.obj.diskfile.os.listdir',
mock_listdir):
# recalc always forces hash_suffix even if the suffix
# does not exist!
df_mgr.get_hashes('sda1', '0', ['123'], policy)
self.assertEqual(listdir_calls, [
# part path gets created automatically
(part_path, True),
# this one blows up
(os.path.join(part_path, '123'), False),
])
def test_hash_suffix_cleanup_ondisk_files_enotdir_quarantined(self):
for policy in self.iter_policies():
df = self.df_router[policy].get_diskfile(
self.existing_device, '0', 'a', 'c', 'o', policy=policy)
# make the suffix directory
suffix_path = os.path.dirname(df._datadir)
os.makedirs(suffix_path)
suffix = os.path.basename(suffix_path)
# make the df hash path a file
open(df._datadir, 'wb').close()
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertEqual(hashes, {})
# and hash path is quarantined
self.assertFalse(os.path.exists(df._datadir))
# each device a quarantined directory
quarantine_base = os.path.join(self.devices,
self.existing_device, 'quarantined')
# the quarantine path is...
quarantine_path = os.path.join(
quarantine_base, # quarantine root
diskfile.get_data_dir(policy), # per-policy data dir
os.path.basename(df._datadir) # name of quarantined file
)
self.assertTrue(os.path.exists(quarantine_path))
def test_auditor_hashdir_not_listable(self):
def list_locations(dirname, datadir):
return [(loc.path, loc.device, loc.partition, loc.policy)
for loc in diskfile.object_audit_location_generator(
devices=dirname, datadir=datadir, mount_check=False)]
real_listdir = os.listdir
def splode_if_endswith(suffix, err):
def sploder(path):
if path.endswith(suffix):
raise OSError(err, os.strerror(err))
else:
return real_listdir(path)
return sploder
with temptree([]) as tmpdir:
hashdir1 = os.path.join(tmpdir, "sdf", "objects", "2607", "b54",
"fe450ec990a88cc4b252b181bab04b54")
os.makedirs(hashdir1)
with open(os.path.join(hashdir1, '1656032666.98003.ts'), 'w'):
pass
hashdir2 = os.path.join(tmpdir, "sdf", "objects", "2809", "afd",
"7089ab48d955ab0851fc51cc17a34afd")
os.makedirs(hashdir2)
with open(os.path.join(hashdir2, '1656080624.31899.ts'), 'w'):
pass
expected = [(hashdir2, 'sdf', '2809', POLICIES[0])]
# Parts that look like files are just skipped
with mock.patch('os.listdir', splode_if_endswith(
"2607", errno.ENOTDIR)):
self.assertEqual(expected, list_locations(tmpdir, 'objects'))
diskfile.clear_auditor_status(tmpdir, 'objects')
# ENODATA on a suffix is ok
with mock.patch('os.listdir', splode_if_endswith(
"b54", errno.ENODATA)):
self.assertEqual(expected, list_locations(tmpdir, 'objects'))
diskfile.clear_auditor_status(tmpdir, 'objects')
# sanity the other way
expected = [(hashdir1, 'sdf', '2607', POLICIES[0])]
with mock.patch('os.listdir', splode_if_endswith(
"2809", errno.ENODATA)):
self.assertEqual(expected, list_locations(tmpdir, 'objects'))
diskfile.clear_auditor_status(tmpdir, 'objects')
with mock.patch('os.listdir', splode_if_endswith(
"afd", errno.ENOTDIR)):
self.assertEqual(expected, list_locations(tmpdir, 'objects'))
diskfile.clear_auditor_status(tmpdir, 'objects')
def test_hash_suffix_cleanup_ondisk_files_enodata_quarantined(self):
for policy in self.iter_policies():
df = self.df_router[policy].get_diskfile(
self.existing_device, '0', 'a', 'c', 'o', policy=policy)
# make everything down to the hash directory
os.makedirs(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
orig_listdir = os.listdir
def fake_listdir(path):
if path == df._datadir:
raise OSError(errno.ENODATA, 'nope')
return orig_listdir(path)
df_mgr = self.df_router[policy]
with mock.patch('os.listdir', side_effect=fake_listdir):
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertEqual(hashes, {})
# and hash path is quarantined
self.assertFalse(os.path.exists(df._datadir))
# each device a quarantined directory
quarantine_base = os.path.join(self.devices,
self.existing_device, 'quarantined')
# the quarantine path is...
quarantine_path = os.path.join(
quarantine_base, # quarantine root
diskfile.get_data_dir(policy), # per-policy data dir
os.path.basename(df._datadir) # name of quarantined file
)
self.assertTrue(os.path.exists(quarantine_path))
def test_hash_suffix_cleanup_ondisk_files_other_oserror(self):
for policy in self.iter_policies():
timestamp = self.ts()
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy,
frag_index=7)
suffix = os.path.basename(os.path.dirname(df._datadir))
with df.create() as writer:
test_data = b'test_data'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
orig_os_listdir = os.listdir
listdir_calls = []
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
suffix_path = os.path.join(part_path, suffix)
datadir_path = os.path.join(suffix_path, hash_path('a', 'c', 'o'))
def mock_os_listdir(path):
listdir_calls.append(path)
if path == datadir_path:
# we want the part and suffix listdir calls to pass and
# make the cleanup_ondisk_files raise an exception
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
return orig_os_listdir(path)
with mock.patch('os.listdir', mock_os_listdir):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(listdir_calls, [
part_path,
suffix_path,
datadir_path,
])
expected = {suffix: None}
msg = 'expected %r != %r for policy %r' % (
expected, hashes, policy)
self.assertEqual(hashes, expected, msg)
def test_hash_suffix_rmdir_hsh_path_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make an empty hsh_path to be removed
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy)
os.makedirs(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
with mock.patch('os.rmdir', side_effect=OSError()):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
expected = {
EC_POLICY: {},
REPL_POLICY: md5().hexdigest(),
}[policy.policy_type]
self.assertEqual(hashes, {suffix: expected})
self.assertTrue(os.path.exists(df._datadir))
def test_hash_suffix_rmdir_suffix_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# make an empty hsh_path to be removed
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy)
os.makedirs(df._datadir)
suffix_path = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_path)
captured_paths = []
def mock_rmdir(path):
captured_paths.append(path)
if path == suffix_path:
raise OSError('kaboom!')
with mock.patch('os.rmdir', mock_rmdir):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
expected = {
EC_POLICY: {},
REPL_POLICY: md5().hexdigest(),
}[policy.policy_type]
self.assertEqual(hashes, {suffix: expected})
self.assertTrue(os.path.exists(suffix_path))
self.assertEqual([
df._datadir,
suffix_path,
], captured_paths)
# get_hashes tests - behaviors
def test_get_hashes_does_not_create_partition(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
self.assertFalse(os.path.exists(part_path))
def test_get_hashes_creates_pkl(self):
# like above, but -- if the partition already exists, make the pickle
for policy in self.iter_policies():
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
df_mgr = self.df_router[policy]
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(part_path))
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
self.assertTrue(os.path.exists(hashes_file))
# and double check the hashes
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, new_hashes)
def _do_test_get_hashes_new_pkl_finds_new_suffix_dirs(self, device):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix_dir = os.path.dirname(df._datadir)
suffix = os.path.basename(suffix_dir)
# get_hashes will find the untracked suffix dir
self.assertFalse(os.path.exists(hashes_file)) # sanity
hashes = df_mgr.get_hashes(device, '0', [], policy)
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
# repeat and check there is no rehashing
with mock.patch.object(df_mgr, '_hash_suffix',
return_value=hashes[suffix]) as mocked:
repeat_hashes = df_mgr.get_hashes(device, '0', [], policy)
self.assertEqual(hashes, repeat_hashes)
mocked.assert_not_called()
def test_get_hashes_new_pkl_finds_new_suffix_dirs_unicode(self):
self._do_test_get_hashes_new_pkl_finds_new_suffix_dirs(u'sda1')
def test_get_hashes_new_pkl_finds_new_suffix_dirs(self):
self._do_test_get_hashes_new_pkl_finds_new_suffix_dirs('sda1')
def test_get_hashes_new_pkl_missing_invalid_finds_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix = os.path.basename(os.path.dirname(df._datadir))
with open(invalidations_file) as f:
self.assertEqual('%s\n' % suffix, f.read())
# even if invalidations_file is missing ...
os.unlink(invalidations_file)
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
# get_hashes will *still* find the untracked suffix dir
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
def test_get_hashes_new_pkl_lying_invalid_finds_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(
self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
invalidations_file = os.path.join(
part_path, diskfile.HASH_INVALIDATIONS_FILE)
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
suffix = os.path.basename(os.path.dirname(df._datadir))
with open(invalidations_file) as f:
self.assertEqual('%s\n' % suffix, f.read())
# even if invalidations_file is lying ...
with open(invalidations_file, 'w') as f:
f.write('%x\n' % (int(suffix, 16) + 1))
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
# get_hashes will *still* find the untracked suffix dir
self.assertIn(suffix, hashes)
# ... and create a hashes pickle for it
self.assertTrue(os.path.exists(hashes_file))
def test_get_hashes_old_pickle_does_not_find_new_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create an empty stale pickle
part_path = os.path.join(
self.devices, 'sda1', diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertEqual(hashes, {})
self.assertTrue(os.path.exists(hashes_file)) # sanity
# add something to find
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c', 'o',
policy=policy, frag_index=4)
os.makedirs(df._datadir)
filename = Timestamp.now().internal + '.ts'
open(os.path.join(df._datadir, filename), 'w').close()
suffix = os.path.basename(os.path.dirname(df._datadir))
# but get_hashes has no reason to find it (because we didn't
# call invalidate_hash)
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(new_hashes, hashes)
# ... unless remote end asks for a recalc
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertIn(suffix, hashes)
def test_get_hashes_does_not_rehash_known_suffix_dirs(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=4)
suffix = os.path.basename(os.path.dirname(df._datadir))
timestamp = self.ts()
df.delete(timestamp)
# create the baseline hashes file
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertIn(suffix, hashes)
# now change the contents of the suffix w/o calling
# invalidate_hash
rmtree(df._datadir)
suffix_path = os.path.dirname(df._datadir)
self.assertTrue(os.path.exists(suffix_path)) # sanity
new_hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
# ... and get_hashes is none the wiser
self.assertEqual(new_hashes, hashes)
# ... unless remote end asks for a recalc
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
self.assertNotEqual(new_hashes, hashes)
# and the empty suffix path is removed
self.assertFalse(os.path.exists(suffix_path))
# ... and the suffix key is removed
expected = {}
self.assertEqual(expected, hashes)
def test_get_hashes_multi_file_multi_suffix(self):
paths, suffix = find_paths_with_matching_suffixes(needed_matches=2,
needed_suffixes=3)
matching_paths = paths.pop(suffix)
matching_paths.sort(key=lambda path: hash_path(*path))
other_paths = []
for suffix, paths in paths.items():
other_paths.append(paths[0])
if len(other_paths) >= 2:
break
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# first we'll make a tombstone
df = df_mgr.get_diskfile(self.existing_device, '0',
*other_paths[0], policy=policy,
frag_index=4)
timestamp = self.ts()
df.delete(timestamp)
tombstone_hash = md5(timestamp.internal + '.ts').hexdigest()
tombstone_suffix = os.path.basename(os.path.dirname(df._datadir))
# second file in another suffix has a .datafile
df = df_mgr.get_diskfile(self.existing_device, '0',
*other_paths[1], policy=policy,
frag_index=5)
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
datafile_name = _make_datafilename(
timestamp, policy, frag_index=5)
durable_hash = md5(timestamp.internal + '.durable').hexdigest()
datafile_suffix = os.path.basename(os.path.dirname(df._datadir))
# in the *third* suffix - two datafiles for different hashes
df = df_mgr.get_diskfile(self.existing_device, '0',
*matching_paths[0], policy=policy,
frag_index=6)
matching_suffix = os.path.basename(os.path.dirname(df._datadir))
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
# we'll keep track of file names for hash calculations
filename = _make_datafilename(
timestamp, policy, frag_index=6)
data_filenames = {
6: filename
}
df = df_mgr.get_diskfile(self.existing_device, '0',
*matching_paths[1], policy=policy,
frag_index=7)
self.assertEqual(os.path.basename(os.path.dirname(df._datadir)),
matching_suffix) # sanity
timestamp = self.ts()
with df.create() as writer:
test_data = b'test_file'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'ETag': md5(test_data).hexdigest(),
'Content-Length': len(test_data),
}
writer.put(metadata)
writer.commit(timestamp)
filename = _make_datafilename(
timestamp, policy, frag_index=7)
data_filenames[7] = filename
# now make up the expected suffixes!
if policy.policy_type == EC_POLICY:
hasher = md5()
for filename in data_filenames.values():
# each data file updates the hasher with durable timestamp
hasher.update(filename.split('#', 1)[0] + '.durable')
expected = {
tombstone_suffix: {
None: tombstone_hash,
},
datafile_suffix: {
None: durable_hash,
5: self.fname_to_ts_hash(datafile_name),
},
matching_suffix: {
None: hasher.hexdigest(),
6: self.fname_to_ts_hash(data_filenames[6]),
7: self.fname_to_ts_hash(data_filenames[7]),
},
}
elif policy.policy_type == REPL_POLICY:
hasher = md5()
for filename in data_filenames.values():
hasher.update(filename)
expected = {
tombstone_suffix: tombstone_hash,
datafile_suffix: md5(datafile_name).hexdigest(),
matching_suffix: hasher.hexdigest(),
}
else:
self.fail('unknown policy type %r' % policy.policy_type)
hashes = df_mgr.get_hashes('sda1', '0', [], policy)
self.assertEqual(hashes, expected)
# get_hashes tests - error handling
def test_get_hashes_bad_dev(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df_mgr.mount_check = True
with mock_check_drive(ismount=False):
self.assertRaises(
DiskFileDeviceUnavailable,
df_mgr.get_hashes, self.existing_device, '0', ['123'],
policy)
def test_get_hashes_zero_bytes_pickle(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
os.makedirs(part_path)
# create a pre-existing zero-byte file
open(os.path.join(part_path, diskfile.HASH_FILE), 'w').close()
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {})
def _test_get_hashes_race(self, hash_breaking_function):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=3)
suffix = os.path.basename(os.path.dirname(df._datadir))
df2 = self.get_different_suffix_df(df, frag_index=5)
suffix2 = os.path.basename(os.path.dirname(df2._datadir))
part_path = os.path.dirname(os.path.dirname(
os.path.join(df._datadir)))
mkdirs(part_path)
hashfile_path = os.path.join(part_path, diskfile.HASH_FILE)
# create hashes.pkl
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertEqual(hashes, {}) # sanity
self.assertTrue(os.path.exists(hashfile_path))
# and optionally tamper with the hashes.pkl...
hash_breaking_function(hashfile_path)
non_local = {'called': False}
orig_hash_suffix = df_mgr._hash_suffix
# then create a suffix
df.delete(self.ts())
def mock_hash_suffix(*args, **kwargs):
# capture first call to mock_hash
if not non_local['called']:
non_local['called'] = True
df2.delete(self.ts())
non_local['other_hashes'] = df_mgr.get_hashes(
self.existing_device, '0', [], policy)
return orig_hash_suffix(*args, **kwargs)
with mock.patch.object(df_mgr, '_hash_suffix', mock_hash_suffix):
hashes = df_mgr.get_hashes(self.existing_device, '0', [],
policy)
self.assertTrue(non_local['called'])
self.assertIn(suffix, hashes)
self.assertIn(suffix2, hashes)
def test_get_hashes_race_invalid_pickle(self):
def hash_breaking_function(hashfile_path):
# create a garbage invalid zero-byte file which can not unpickle
open(hashfile_path, 'w').close()
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_race_new_partition(self):
def hash_breaking_function(hashfile_path):
# simulate rebalanced part doing post-rsync REPLICATE
os.unlink(hashfile_path)
part_dir = os.path.dirname(hashfile_path)
os.unlink(os.path.join(part_dir, '.lock'))
# sanity
self.assertEqual([], os.listdir(os.path.dirname(hashfile_path)))
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_race_existing_partition(self):
def hash_breaking_function(hashfile_path):
# no-op - simulate ok existing partition
self.assertTrue(os.path.exists(hashfile_path))
self._test_get_hashes_race(hash_breaking_function)
def test_get_hashes_hash_suffix_enotdir(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
# create a real suffix dir
df = df_mgr.get_diskfile(self.existing_device, '0', 'a', 'c',
'o', policy=policy, frag_index=3)
df.delete(Timestamp.now())
suffix = os.path.basename(os.path.dirname(df._datadir))
# touch a bad suffix dir
part_dir = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
open(os.path.join(part_dir, 'bad'), 'w').close()
hashes = df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertIn(suffix, hashes)
self.assertNotIn('bad', hashes)
def test_get_hashes_hash_suffix_other_oserror(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
suffix = '123'
suffix_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0',
suffix)
os.makedirs(suffix_path)
self.assertTrue(os.path.exists(suffix_path)) # sanity
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
policy)
expected = {}
msg = 'expected %r != %r for policy %r' % (expected, hashes,
policy)
self.assertEqual(hashes, expected, msg)
# this OSError does *not* raise PathNotDir, and is allowed to leak
# from hash_suffix into get_hashes
mocked_os_listdir = mock.Mock(
side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch("os.listdir", mocked_os_listdir):
with mock.patch('swift.obj.diskfile.logging') as mock_logging:
hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy)
self.assertEqual(mock_logging.method_calls,
[mock.call.exception('Error hashing suffix')])
# recalc always causes a suffix to get reset to None; the listdir
# error prevents the suffix from being rehashed
expected = {'123': None}
msg = 'expected %r != %r for policy %r' % (expected, hashes,
policy)
self.assertEqual(hashes, expected, msg)
def test_get_hashes_modified_recursive_retry(self):
for policy in self.iter_policies():
df_mgr = self.df_router[policy]
part_path = os.path.join(self.devices, self.existing_device,
diskfile.get_data_dir(policy), '0')
mkdirs(part_path)
# first create an empty pickle
df_mgr.get_hashes(self.existing_device, '0', [], policy)
self.assertTrue(os.path.exists(os.path.join(
part_path, diskfile.HASH_FILE)))
non_local = {'suffix_count': 1}
calls = []
def mock_read_hashes(filename):
rv = {'%03x' % i: 'fake'
for i in range(non_local['suffix_count'])}
if len(calls) <= 3:
# this will make the *next* call get slightly
# different content
non_local['suffix_count'] += 1
# track exactly the value for every return
calls.append(dict(rv))
rv['valid'] = True
return rv
with mock.patch('swift.obj.diskfile.read_hashes',
mock_read_hashes):
df_mgr.get_hashes(self.existing_device, '0', ['123'],
policy)
self.assertEqual(calls, [
{'000': 'fake'}, # read
{'000': 'fake', '001': 'fake'}, # modification
{'000': 'fake', '001': 'fake', '002': 'fake'}, # read
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake'}, # modifed
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake', '004': 'fake'}, # read
{'000': 'fake', '001': 'fake', '002': 'fake',
'003': 'fake', '004': 'fake'}, # not modifed
])
class TestHashesHelpers(unittest.TestCase):
def setUp(self):
self.testdir = tempfile.mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_read_legacy_hashes(self):
hashes = {'fff': 'fake'}
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'wb') as f:
pickle.dump(hashes, f)
expected = {
'fff': 'fake',
'updated': -1,
'valid': True,
}
self.assertEqual(expected, diskfile.read_hashes(self.testdir))
def test_write_hashes_valid_updated(self):
hashes = {'888': 'fake', 'valid': True}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'888': 'fake',
'updated': now,
'valid': True,
}
self.assertEqual(expected, data)
def test_write_hashes_invalid_updated(self):
hashes = {'valid': False}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'updated': now,
'valid': False,
}
self.assertEqual(expected, data)
def test_write_hashes_safe_default(self):
hashes = {}
now = time()
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
diskfile.write_hashes(self.testdir, hashes)
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
with open(hashes_file, 'rb') as f:
data = pickle.load(f)
expected = {
'updated': now,
'valid': False,
}
self.assertEqual(expected, data)
def test_read_write_valid_hashes_mutation_and_transative_equality(self):
hashes = {'000': 'fake', 'valid': True}
diskfile.write_hashes(self.testdir, hashes)
# write_hashes mutates the passed in hashes, it adds the updated key
self.assertIn('updated', hashes)
self.assertTrue(hashes['valid'])
result = diskfile.read_hashes(self.testdir)
# unpickling result in a new object
self.assertNotEqual(id(hashes), id(result))
# with the exactly the same value mutation from write_hashes
self.assertEqual(hashes, result)
def test_read_write_invalid_hashes_mutation_and_transative_equality(self):
hashes = {'valid': False}
diskfile.write_hashes(self.testdir, hashes)
# write_hashes mutates the passed in hashes, it adds the updated key
self.assertIn('updated', hashes)
self.assertFalse(hashes['valid'])
result = diskfile.read_hashes(self.testdir)
# unpickling result in a new object
self.assertNotEqual(id(hashes), id(result))
# with the exactly the same value mutation from write_hashes
self.assertEqual(hashes, result)
def test_ignore_corrupted_hashes(self):
corrupted_hashes = {u'\x00\x00\x00': False, 'valid': True}
diskfile.write_hashes(self.testdir, corrupted_hashes)
result = diskfile.read_hashes(self.testdir)
self.assertFalse(result['valid'])
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_diskfile.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import time
import unittest
import eventlet
import mock
import six
from swift.common import exceptions, utils
from swift.common.storage_policy import POLICIES
from swift.common.swob import wsgi_to_bytes, wsgi_to_str
from swift.common.utils import Timestamp
from swift.obj import ssync_sender, diskfile, ssync_receiver
from swift.obj.replicator import ObjectReplicator
from test.debug_logger import debug_logger
from test.unit.obj.common import BaseTest
from test.unit import patch_policies, make_timestamp_iter, skip_if_no_xattrs
class NullBufferedHTTPConnection(object):
def __init__(*args, **kwargs):
pass
def putrequest(*args, **kwargs):
pass
def putheader(*args, **kwargs):
pass
def endheaders(*args, **kwargs):
pass
def getresponse(*args, **kwargs):
pass
def close(*args, **kwargs):
pass
class FakeResponse(ssync_sender.SsyncBufferedHTTPResponse):
def __init__(self, chunk_body='', headers=None):
self.status = 200
self.close_called = False
if not six.PY2:
chunk_body = chunk_body.encode('ascii')
if chunk_body:
self.fp = io.BytesIO(
b'%x\r\n%s\r\n0\r\n\r\n' % (len(chunk_body), chunk_body))
self.ssync_response_buffer = b''
self.ssync_response_chunk_left = 0
self.headers = headers or {}
def read(self, *args, **kwargs):
return b''
def close(self):
self.close_called = True
def getheader(self, header_name, default=None):
return str(self.headers.get(header_name, default))
def getheaders(self):
return self.headers.items()
class FakeConnection(object):
def __init__(self):
self.sent = []
self.closed = False
def send(self, data):
self.sent.append(data)
def close(self):
self.closed = True
@patch_policies()
class TestSender(BaseTest):
def setUp(self):
skip_if_no_xattrs()
super(TestSender, self).setUp()
self.daemon_logger = debug_logger('test-ssync-sender')
self.daemon = ObjectReplicator(self.daemon_conf,
self.daemon_logger)
self.job = {'policy': POLICIES.legacy,
'device': 'test-dev',
'partition': '99'} # sufficient for Sender.__init__
self.sender = ssync_sender.Sender(self.daemon, None, self.job, None)
def test_call_catches_MessageTimeout(self):
def connect(self):
exc = exceptions.MessageTimeout(1, 'test connect')
# Cancels Eventlet's raising of this since we're about to do it.
exc.cancel()
raise exc
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES.legacy)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual('1.2.3.4:5678/sda1/9 1 second: test connect',
error_lines[0])
def test_call_catches_ReplicationException(self):
def connect(self):
raise exceptions.ReplicationException('test connect')
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES.legacy)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual('1.2.3.4:5678/sda1/9 test connect',
error_lines[0])
def test_call_catches_other_exceptions(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES.legacy)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'1.2.3.4:5678/sda1/9 EXCEPTION in ssync.Sender: '))
def test_call_catches_exception_handling_exception(self):
self.sender.node = None # Will cause inside exception handler to fail
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'EXCEPTION in ssync.Sender'))
def test_call_calls_others(self):
connection = FakeConnection()
response = FakeResponse()
self.sender.suffixes = ['abc']
self.sender.connect = mock.MagicMock(return_value=(connection,
response))
self.sender.missing_check = mock.MagicMock(return_value=({}, {}))
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
self.assertEqual(candidates, {})
self.sender.connect.assert_called_once_with()
self.sender.missing_check.assert_called_once_with(connection, response)
self.sender.updates.assert_called_once_with(connection, response, {})
self.sender.disconnect.assert_called_once_with(connection)
def test_connect(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', backend_index=0)
job = dict(partition='9', policy=POLICIES[1])
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
mock.call('X-Backend-Ssync-Frag-Index', 0),
mock.call('X-Backend-Ssync-Node-Index', 0),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def test_connect_handoff(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES[1], frag_index=9)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def test_connect_handoff_no_frag(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES[0])
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 0),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def test_connect_handoff_none_frag(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES[1], frag_index=None)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def test_connect_handoff_none_frag_to_primary(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', backend_index=42)
job = dict(partition='9', policy=POLICIES[1], frag_index=None)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
mock.call('X-Backend-Ssync-Frag-Index', 42),
mock.call('X-Backend-Ssync-Node-Index', 42),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def test_connect_handoff_replicated(self):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
# no frag_index in rsync job
job = dict(partition='9', policy=POLICIES[1])
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_resp = mock.MagicMock()
mock_resp.status = 200
mock_conn.getresponse.return_value = mock_resp
self.sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
expectations = {
'putrequest': [
mock.call('SSYNC', '/sda1/9'),
],
'putheader': [
mock.call('Transfer-Encoding', 'chunked'),
mock.call('X-Backend-Storage-Policy-Index', 1),
],
'endheaders': [mock.call()],
}
for method_name, expected_calls in expectations.items():
mock_method = getattr(mock_conn, method_name)
self.assertEqual(expected_calls, mock_method.mock_calls,
'connection method "%s" got %r not %r' % (
method_name, mock_method.mock_calls,
expected_calls))
def _do_test_connect_include_non_durable(self,
include_non_durable,
resp_headers):
# construct sender and make connect call
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', backend_index=0)
job = dict(partition='9', policy=POLICIES[1])
sender = ssync_sender.Sender(self.daemon, node, job, None,
include_non_durable=include_non_durable)
self.assertEqual(include_non_durable, sender.include_non_durable)
with mock.patch(
'swift.obj.ssync_sender.SsyncBufferedHTTPConnection'
) as mock_conn_class:
mock_conn = mock_conn_class.return_value
mock_conn.getresponse.return_value = FakeResponse('', resp_headers)
sender.connect()
mock_conn_class.assert_called_once_with('1.2.3.4:5678')
return sender
def test_connect_legacy_receiver(self):
sender = self._do_test_connect_include_non_durable(False, {})
self.assertFalse(sender.include_non_durable)
warnings = self.daemon_logger.get_lines_for_level('warning')
self.assertEqual([], warnings)
def test_connect_upgraded_receiver(self):
resp_hdrs = {'x-backend-accept-no-commit': 'True'}
sender = self._do_test_connect_include_non_durable(False, resp_hdrs)
# 'x-backend-accept-no-commit' in response does not override
# sender.include_non_durable
self.assertFalse(sender.include_non_durable)
warnings = self.daemon_logger.get_lines_for_level('warning')
self.assertEqual([], warnings)
def test_connect_legacy_receiver_include_non_durable(self):
sender = self._do_test_connect_include_non_durable(True, {})
# no 'x-backend-accept-no-commit' in response,
# sender.include_non_durable has been overridden
self.assertFalse(sender.include_non_durable)
warnings = self.daemon_logger.get_lines_for_level('warning')
self.assertEqual(['ssync receiver 1.2.3.4:5678 does not accept '
'non-durable fragments'], warnings)
def test_connect_upgraded_receiver_include_non_durable(self):
resp_hdrs = {'x-backend-accept-no-commit': 'True'}
sender = self._do_test_connect_include_non_durable(True, resp_hdrs)
self.assertTrue(sender.include_non_durable)
warnings = self.daemon_logger.get_lines_for_level('warning')
self.assertEqual([], warnings)
def test_call(self):
def patch_sender(sender, available_map, send_map):
connection = FakeConnection()
response = FakeResponse()
sender.connect = mock.MagicMock(return_value=(connection,
response))
sender.missing_check = mock.MagicMock()
sender.missing_check = mock.MagicMock(return_value=(available_map,
send_map))
sender.updates = mock.MagicMock()
sender.disconnect = mock.MagicMock()
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
'frag_index': 0,
}
available_map = dict([('9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000'),
('9d41d8cd98f00b204e9800998ecf0def',
'1380144472.22222'),
('9d41d8cd98f00b204e9800998ecf1def',
'1380144474.44444')])
# no suffixes -> no work done
sender = ssync_sender.Sender(
self.daemon, node, job, [], remote_check_objs=None)
patch_sender(sender, available_map, {})
success, candidates = sender()
self.assertTrue(success)
self.assertEqual({}, candidates)
# all objs in sync
sender = ssync_sender.Sender(
self.daemon, node, job, ['ignored'], remote_check_objs=None)
patch_sender(sender, available_map, {})
success, candidates = sender()
self.assertTrue(success)
self.assertEqual(available_map, candidates)
# one obj not in sync, sync'ing faked, all objs should be in return set
wanted = '9d41d8cd98f00b204e9800998ecf0def'
sender = ssync_sender.Sender(
self.daemon, node, job, ['ignored'],
remote_check_objs=None)
patch_sender(sender, available_map, {wanted: []})
success, candidates = sender()
self.assertTrue(success)
self.assertEqual(available_map, candidates)
# one obj not in sync, remote check only so that obj is not sync'd
# and should not be in the return set
wanted = '9d41d8cd98f00b204e9800998ecf0def'
remote_check_objs = set(available_map.keys())
sender = ssync_sender.Sender(
self.daemon, node, job, ['ignored'],
remote_check_objs=remote_check_objs)
patch_sender(sender, available_map, {wanted: []})
success, candidates = sender()
self.assertTrue(success)
expected_map = dict([('9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000'),
('9d41d8cd98f00b204e9800998ecf1def',
'1380144474.44444')])
self.assertEqual(expected_map, candidates)
def test_call_and_missing_check_metadata_legacy_response(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
and policy == POLICIES.legacy:
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000),
'ts_meta': Timestamp(1380155570.00005)})
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
connection = FakeConnection()
self.sender.node = {}
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.suffixes = ['abc']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'9d41d8cd98f00b204e9800998ecf0abc\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
':UPDATES: END\r\n'
))
self.sender.df_mgr.yield_hashes = yield_hashes
self.sender.connect = mock.MagicMock(return_value=(connection,
response))
df = mock.MagicMock()
df.content_length = 0
self.sender.df_mgr.get_diskfile_from_hash = mock.MagicMock(
return_value=df)
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
found_post = found_put = False
for chunk in connection.sent:
if b'POST' in chunk:
found_post = True
if b'PUT' in chunk:
found_put = True
self.assertFalse(found_post)
self.assertTrue(found_put)
def test_call_and_missing_check(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
and policy == POLICIES.legacy:
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
connection = FakeConnection()
self.sender.node = {}
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.suffixes = ['abc']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'9d41d8cd98f00b204e9800998ecf0abc d\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.sender.connect = mock.MagicMock(return_value=(connection,
response))
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
self.assertEqual(candidates,
dict([('9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})]))
def test_call_and_missing_check_with_obj_list(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
and policy == POLICIES.legacy:
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender = ssync_sender.Sender(self.daemon, None, job, ['abc'],
['9d41d8cd98f00b204e9800998ecf0abc'])
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.sender.connect = mock.MagicMock(return_value=(connection,
response))
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
self.assertEqual(candidates,
dict([('9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})]))
def test_call_and_missing_check_with_obj_list_but_required(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if device == 'dev' and partition == '9' and suffixes == ['abc'] \
and policy == POLICIES.legacy:
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender = ssync_sender.Sender(self.daemon, {}, job, ['abc'],
['9d41d8cd98f00b204e9800998ecf0abc'])
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'9d41d8cd98f00b204e9800998ecf0abc d\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.sender.connect = mock.MagicMock(return_value=(connection,
response))
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
success, candidates = self.sender()
self.assertTrue(success)
self.assertEqual(candidates, {})
def test_connect_send_timeout(self):
self.daemon.node_timeout = 0.01 # make disconnect fail fast
self.daemon.conn_timeout = 0.01
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1')
job = dict(partition='9', policy=POLICIES.legacy)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
def putrequest(*args, **kwargs):
eventlet.sleep(0.1)
with mock.patch.object(
ssync_sender.bufferedhttp.BufferedHTTPConnection,
'putrequest', putrequest):
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'1.2.3.4:5678/sda1/9 0.01 seconds: connect send'))
def test_connect_receive_timeout(self):
self.daemon.node_timeout = 0.02
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', index=0)
job = dict(partition='9', policy=POLICIES.legacy)
self.sender = ssync_sender.Sender(self.daemon, node, job, None)
self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
def getresponse(*args, **kwargs):
eventlet.sleep(0.1)
with mock.patch.object(
ssync_sender, 'SsyncBufferedHTTPConnection',
FakeBufferedHTTPConnection):
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'1.2.3.4:5678/sda1/9 0.02 seconds: connect receive'))
def test_connect_bad_status(self):
self.daemon.node_timeout = 0.02
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', index=0)
job = dict(partition='9', policy=POLICIES.legacy)
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
def getresponse(*args, **kwargs):
response = FakeResponse()
response.status = 503
response.read = lambda: 'an error message'
return response
missing_check_fn = 'swift.obj.ssync_sender.Sender.missing_check'
with mock.patch(missing_check_fn) as mock_missing_check:
with mock.patch.object(
ssync_sender, 'SsyncBufferedHTTPConnection',
FakeBufferedHTTPConnection):
self.sender = ssync_sender.Sender(
self.daemon, node, job, ['abc'])
success, candidates = self.sender()
self.assertFalse(success)
self.assertEqual(candidates, {})
error_lines = self.daemon_logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'1.2.3.4:5678/sda1/9 Expected status 200; got 503'))
self.assertIn('an error message', line)
# sanity check that Sender did not proceed to missing_check exchange
self.assertFalse(mock_missing_check.called)
def test_readline_newline_in_buffer(self):
response = FakeResponse()
response.ssync_response_buffer = b'Has a newline already.\r\nOkay.'
self.assertEqual(response.readline(), b'Has a newline already.\r\n')
self.assertEqual(response.ssync_response_buffer, b'Okay.')
def test_readline_buffer_exceeds_network_chunk_size_somehow(self):
response = FakeResponse()
response.ssync_response_buffer = b'1234567890'
self.assertEqual(response.readline(size=2), b'1234567890')
self.assertEqual(response.ssync_response_buffer, b'')
def test_readline_at_start_of_chunk(self):
response = FakeResponse()
response.fp = io.BytesIO(b'2\r\nx\n\r\n')
self.assertEqual(response.readline(), b'x\n')
def test_readline_chunk_with_extension(self):
response = FakeResponse()
response.fp = io.BytesIO(
b'2 ; chunk=extension\r\nx\n\r\n')
self.assertEqual(response.readline(), b'x\n')
def test_readline_broken_chunk(self):
response = FakeResponse()
response.fp = io.BytesIO(b'q\r\nx\n\r\n')
self.assertRaises(
exceptions.ReplicationException, response.readline)
self.assertTrue(response.close_called)
def test_readline_terminated_chunk(self):
response = FakeResponse()
response.fp = io.BytesIO(b'b\r\nnot enough')
self.assertRaises(
exceptions.ReplicationException, response.readline)
self.assertTrue(response.close_called)
def test_readline_all(self):
response = FakeResponse()
response.fp = io.BytesIO(b'2\r\nx\n\r\n0\r\n\r\n')
self.assertEqual(response.readline(), b'x\n')
self.assertEqual(response.readline(), b'')
self.assertEqual(response.readline(), b'')
def test_readline_all_trailing_not_newline_termed(self):
response = FakeResponse()
response.fp = io.BytesIO(
b'2\r\nx\n\r\n3\r\n123\r\n0\r\n\r\n')
self.assertEqual(response.readline(), b'x\n')
self.assertEqual(response.readline(), b'123')
self.assertEqual(response.readline(), b'')
self.assertEqual(response.readline(), b'')
def test_missing_check_timeout_start(self):
connection = FakeConnection()
response = FakeResponse()
self.sender.daemon.node_timeout = 0.01
self.assertFalse(self.sender.limited_by_max_objects)
with mock.patch.object(connection, 'send',
side_effect=lambda *args: eventlet.sleep(1)):
with self.assertRaises(exceptions.MessageTimeout) as cm:
self.sender.missing_check(connection, response)
self.assertIn('0.01 seconds: missing_check start', str(cm.exception))
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_timeout_send_line(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
yield (
'9d41d8cd98f00b204e9800998ecf0def',
{'ts_data': Timestamp(1380144471.00000)})
connection = FakeConnection()
response = FakeResponse()
# max_objects unlimited
self.sender = ssync_sender.Sender(self.daemon, None, self.job, None,
max_objects=0)
self.sender.daemon.node_timeout = 0.01
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
sleeps = [0, 0, 1]
with mock.patch.object(
connection, 'send',
side_effect=lambda *args: eventlet.sleep(sleeps.pop(0))):
with self.assertRaises(exceptions.MessageTimeout) as cm:
self.sender.missing_check(connection, response)
self.assertIn('0.01 seconds: missing_check send line: '
'1 lines (57 bytes) sent', str(cm.exception))
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_has_empty_suffixes(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device != 'dev' or partition != '9' or
policy != POLICIES.legacy or
suffixes != ['abc', 'def']):
yield # Just here to make this a generator
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc', 'def']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(send_map, {})
self.assertEqual(available_map, {})
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_has_suffixes(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc', 'def']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
yield (
'9d41d8cd98f00b204e9800998ecf0def',
{'ts_data': Timestamp(1380144472.22222),
'ts_meta': Timestamp(1380144473.22222)})
yield (
'9d41d8cd98f00b204e9800998ecf1def',
{'ts_data': Timestamp(1380144474.44444),
'ts_ctype': Timestamp(1380144474.44448),
'ts_meta': Timestamp(1380144475.44444)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
# note: max_objects > number that would yield
self.sender = ssync_sender.Sender(self.daemon, None, self.job, None,
max_objects=4)
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc', 'def']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'3b\r\n9d41d8cd98f00b204e9800998ecf0def 1380144472.22222 '
b'm:186a0\r\n\r\n'
b'3f\r\n9d41d8cd98f00b204e9800998ecf1def 1380144474.44444 '
b'm:186a0,t:4\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(send_map, {})
candidates = [('9d41d8cd98f00b204e9800998ecf0abc',
dict(ts_data=Timestamp(1380144470.00000))),
('9d41d8cd98f00b204e9800998ecf0def',
dict(ts_data=Timestamp(1380144472.22222),
ts_meta=Timestamp(1380144473.22222))),
('9d41d8cd98f00b204e9800998ecf1def',
dict(ts_data=Timestamp(1380144474.44444),
ts_meta=Timestamp(1380144475.44444),
ts_ctype=Timestamp(1380144474.44448)))]
self.assertEqual(available_map, dict(candidates))
self.assertEqual([], self.daemon_logger.get_lines_for_level('info'))
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_max_objects_less_than_actual_objects(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
# verify missing_check stops after 2 objects even though more
# objects would yield
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc', 'def']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
yield (
'9d41d8cd98f00b204e9800998ecf0def',
{'ts_data': Timestamp(1380144472.22222),
'ts_meta': Timestamp(1380144473.22222)})
yield (
'9d41d8cd98f00b204e9800998ecf1def',
{'ts_data': Timestamp(1380144474.44444),
'ts_ctype': Timestamp(1380144474.44448),
'ts_meta': Timestamp(1380144475.44444)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
# max_objects < number that would yield
self.sender = ssync_sender.Sender(self.daemon, None, self.job, None,
max_objects=2)
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc', 'def']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'3b\r\n9d41d8cd98f00b204e9800998ecf0def 1380144472.22222 '
b'm:186a0\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(send_map, {})
candidates = [('9d41d8cd98f00b204e9800998ecf0abc',
dict(ts_data=Timestamp(1380144470.00000))),
('9d41d8cd98f00b204e9800998ecf0def',
dict(ts_data=Timestamp(1380144472.22222),
ts_meta=Timestamp(1380144473.22222)))]
self.assertEqual(available_map, dict(candidates))
self.assertEqual(
['ssync missing_check truncated after 2 objects: device: dev, '
'part: 9, policy: 0, last object hash: '
'9d41d8cd98f00b204e9800998ecf0def'],
self.daemon_logger.get_lines_for_level('info'))
self.assertTrue(self.sender.limited_by_max_objects)
def test_missing_check_max_objects_exactly_actual_objects(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc', 'def']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
yield (
'9d41d8cd98f00b204e9800998ecf0def',
{'ts_data': Timestamp(1380144472.22222),
'ts_meta': Timestamp(1380144473.22222)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
# max_objects == number that would yield
self.sender = ssync_sender.Sender(self.daemon, None, self.job, None,
max_objects=2)
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc', 'def']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'3b\r\n9d41d8cd98f00b204e9800998ecf0def 1380144472.22222 '
b'm:186a0\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(send_map, {})
candidates = [('9d41d8cd98f00b204e9800998ecf0abc',
dict(ts_data=Timestamp(1380144470.00000))),
('9d41d8cd98f00b204e9800998ecf0def',
dict(ts_data=Timestamp(1380144472.22222),
ts_meta=Timestamp(1380144473.22222)))]
self.assertEqual(available_map, dict(candidates))
# nothing logged re: truncation
self.assertEqual([], self.daemon_logger.get_lines_for_level('info'))
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_far_end_disconnect(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc']
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
response = FakeResponse(chunk_body='\r\n')
exc = None
try:
self.sender.missing_check(connection, response)
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_far_end_disconnect2(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc']
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
response = FakeResponse(
chunk_body=':MISSING_CHECK: START\r\n')
exc = None
try:
self.sender.missing_check(connection, response)
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_far_end_unexpected(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc']
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
response = FakeResponse(chunk_body='OH HAI\r\n')
exc = None
try:
self.sender.missing_check(connection, response)
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'OH HAI'")
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_send_map(self):
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'0123abc dm\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(send_map, {'0123abc': {'data': True, 'meta': True}})
self.assertEqual(available_map,
dict([('9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})]))
self.assertFalse(self.sender.limited_by_max_objects)
def test_missing_check_extra_line_parts(self):
# check that sender tolerates extra parts in missing check
# line responses to allow for protocol upgrades
def yield_hashes(device, partition, policy, suffixes=None, **kwargs):
if (device == 'dev' and partition == '9' and
policy == POLICIES.legacy and
suffixes == ['abc']):
yield (
'9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})
else:
raise Exception(
'No match for %r %r %r %r' % (device, partition,
policy, suffixes))
connection = FakeConnection()
self.sender.job = {
'device': 'dev',
'partition': '9',
'policy': POLICIES.legacy,
}
self.sender.suffixes = ['abc']
response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'0123abc d extra response parts\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.df_mgr.yield_hashes = yield_hashes
self.assertFalse(self.sender.limited_by_max_objects)
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(send_map, {'0123abc': {'data': True}})
self.assertEqual(available_map,
dict([('9d41d8cd98f00b204e9800998ecf0abc',
{'ts_data': Timestamp(1380144470.00000)})]))
self.assertFalse(self.sender.limited_by_max_objects)
def test_updates_timeout(self):
connection = FakeConnection()
connection.send = lambda d: eventlet.sleep(1)
response = FakeResponse()
self.sender.daemon.node_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates,
connection, response, {})
def test_updates_empty_send_map(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, {})
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_unexpected_response_lines1(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
'abc\r\n'
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'abc'")
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_unexpected_response_lines2(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'abc\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'abc'")
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_is_deleted(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts)
object_hash = utils.hash_path(*object_parts)
delete_timestamp = utils.normalize_timestamp(time.time())
df.delete(delete_timestamp)
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.node = {}
send_map = {object_hash: {'data': True}}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
self.sender.send_delete.assert_called_once_with(
connection, '/a/c/o', delete_timestamp)
self.assertEqual(self.sender.send_put.mock_calls, [])
# note that the delete line isn't actually sent since we mock
# send_delete; send_delete is tested separately.
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_update_send_delete(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts)
object_hash = utils.hash_path(*object_parts)
delete_timestamp = utils.normalize_timestamp(time.time())
df.delete(delete_timestamp)
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.node = {}
send_map = {object_hash: {'data': True}}
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'30\r\n'
b'DELETE /a/c/o\r\n'
b'X-Timestamp: %s\r\n\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n'
% delete_timestamp.encode('ascii')
)
def test_updates_put(self):
# sender has data file and meta file
ts_iter = make_timestamp_iter()
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
t1 = next(ts_iter)
df = self._make_open_diskfile(
device, part, *object_parts, timestamp=t1)
t2 = next(ts_iter)
metadata = {'X-Timestamp': t2.internal, 'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
object_hash = utils.hash_path(*object_parts)
df.open()
expected = df.get_metadata()
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.node = {}
# receiver requested data only
send_map = {object_hash: {'data': True}}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.send_post = mock.MagicMock()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
self.assertEqual(self.sender.send_delete.mock_calls, [])
self.assertEqual(self.sender.send_post.mock_calls, [])
self.assertEqual(1, len(self.sender.send_put.mock_calls))
args, _kwargs = self.sender.send_put.call_args
connection, path, df = args
self.assertEqual(path, '/a/c/o')
self.assertTrue(isinstance(df, diskfile.DiskFile))
self.assertEqual(expected, df.get_metadata())
# note that the put line isn't actually sent since we mock send_put;
# send_put is tested separately.
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_post(self):
ts_iter = make_timestamp_iter()
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
t1 = next(ts_iter)
df = self._make_open_diskfile(
device, part, *object_parts, timestamp=t1)
t2 = next(ts_iter)
metadata = {'X-Timestamp': t2.internal, 'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
object_hash = utils.hash_path(*object_parts)
df.open()
expected = df.get_metadata()
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.node = {}
# receiver requested only meta
send_map = {object_hash: {'meta': True}}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.send_post = mock.MagicMock()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
self.assertEqual(self.sender.send_delete.mock_calls, [])
self.assertEqual(self.sender.send_put.mock_calls, [])
self.assertEqual(1, len(self.sender.send_post.mock_calls))
args, _kwargs = self.sender.send_post.call_args
connection, path, df = args
self.assertEqual(path, '/a/c/o')
self.assertIsInstance(df, diskfile.DiskFile)
self.assertEqual(expected, df.get_metadata())
# note that the post line isn't actually sent since we mock send_post;
# send_post is tested separately.
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_put_and_post(self):
ts_iter = make_timestamp_iter()
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
t1 = next(ts_iter)
df = self._make_open_diskfile(
device, part, *object_parts, timestamp=t1)
t2 = next(ts_iter)
metadata = {'X-Timestamp': t2.internal, 'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
object_hash = utils.hash_path(*object_parts)
df.open()
expected = df.get_metadata()
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.legacy,
'frag_index': 0,
}
self.sender.node = {}
# receiver requested data and meta
send_map = {object_hash: {'meta': True, 'data': True}}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.send_post = mock.MagicMock()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
self.assertEqual(self.sender.send_delete.mock_calls, [])
self.assertEqual(1, len(self.sender.send_put.mock_calls))
self.assertEqual(1, len(self.sender.send_post.mock_calls))
args, _kwargs = self.sender.send_put.call_args
connection, path, df = args
self.assertEqual(path, '/a/c/o')
self.assertIsInstance(df, diskfile.DiskFile)
self.assertEqual(expected, df.get_metadata())
args, _kwargs = self.sender.send_post.call_args
connection, path, df = args
self.assertEqual(path, '/a/c/o')
self.assertIsInstance(df, diskfile.DiskFile)
self.assertEqual(expected, df.get_metadata())
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_storage_policy_index(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts,
policy=POLICIES[0])
object_hash = utils.hash_path(*object_parts)
expected = df.get_metadata()
connection = FakeConnection()
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES[0],
'frag_index': 0}
self.sender.node = {}
send_map = {object_hash: {'data': True}}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates(connection, response, send_map)
args, _kwargs = self.sender.send_put.call_args
connection, path, df = args
self.assertEqual(path, '/a/c/o')
self.assertTrue(isinstance(df, diskfile.DiskFile))
self.assertEqual(expected, df.get_metadata())
self.assertEqual(os.path.join(self.tx_testdir, 'dev/objects/9/',
object_hash[-3:], object_hash),
df._datadir)
def test_updates_read_response_timeout_start(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
orig_readline = response.readline
def delayed_readline(*args, **kwargs):
eventlet.sleep(1)
return orig_readline(*args, **kwargs)
response.readline = delayed_readline
self.sender.daemon.http_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates,
connection, response, {})
def test_updates_read_response_disconnect_start(self):
connection = FakeConnection()
response = FakeResponse(chunk_body='\r\n')
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_unexp_start(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
'anything else\r\n'
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_timeout_end(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
orig_readline = response.readline
def delayed_readline(*args, **kwargs):
rv = orig_readline(*args, **kwargs)
if rv == b':UPDATES: END\r\n':
eventlet.sleep(1)
return rv
response.readline = delayed_readline
self.sender.daemon.http_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates,
connection, response, {})
def test_updates_read_response_disconnect_end(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'\r\n'))
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_unexp_end(self):
connection = FakeConnection()
response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'anything else\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates(connection, response, {})
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
def test_send_delete_timeout(self):
connection = FakeConnection()
connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
exc = None
try:
self.sender.send_delete(connection, '/a/c/o',
utils.Timestamp('1381679759.90941'))
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_delete')
def test_send_delete(self):
connection = FakeConnection()
self.sender.send_delete(connection, '/a/c/o',
utils.Timestamp('1381679759.90941'))
self.assertEqual(
b''.join(connection.sent),
b'30\r\n'
b'DELETE /a/c/o\r\n'
b'X-Timestamp: 1381679759.90941\r\n'
b'\r\n\r\n')
def test_send_put_initial_timeout(self):
df = self._make_open_diskfile()
df._disk_chunk_size = 2
connection = FakeConnection()
connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
exc = None
try:
self.sender.send_put(connection, '/a/c/o', df)
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_put')
def test_send_put_chunk_timeout(self):
df = self._make_open_diskfile()
connection = FakeConnection()
self.sender.daemon.node_timeout = 0.01
one_shot = [None]
def mock_send(data):
try:
one_shot.pop()
except IndexError:
eventlet.sleep(1)
connection.send = mock_send
exc = None
try:
self.sender.send_put(connection, '/a/c/o', df)
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_put chunk')
def _check_send_put(self, obj_name, meta_value,
meta_name='Unicode-Meta-Name', durable=True):
ts_iter = make_timestamp_iter()
t1 = next(ts_iter)
body = b'test'
extra_metadata = {'Some-Other-Header': 'value',
meta_name: meta_value}
# Note that diskfile expects obj_name to be a native string
# but metadata to be wsgi strings
df = self._make_open_diskfile(obj=obj_name, body=body,
timestamp=t1,
extra_metadata=extra_metadata,
commit=durable)
expected = dict(df.get_metadata())
expected['body'] = body if six.PY2 else body.decode('ascii')
expected['chunk_size'] = len(body)
expected['meta'] = meta_value
expected['meta_name'] = meta_name
path = six.moves.urllib.parse.quote(expected['name'])
expected['path'] = path
no_commit = '' if durable else 'X-Backend-No-Commit: True\r\n'
expected['no_commit'] = no_commit
length = 128 + len(path) + len(meta_value) + len(no_commit) + \
len(meta_name)
expected['length'] = format(length, 'x')
# .meta file metadata is not included in expected for data only PUT
t2 = next(ts_iter)
metadata = {'X-Timestamp': t2.internal, 'X-Object-Meta-Fruit': 'kiwi'}
df.write_metadata(metadata)
df.open()
connection = FakeConnection()
self.sender.send_put(connection, path, df, durable=durable)
expected = (
'%(length)s\r\n'
'PUT %(path)s\r\n'
'Content-Length: %(Content-Length)s\r\n'
'ETag: %(ETag)s\r\n'
'Some-Other-Header: value\r\n'
'%(meta_name)s: %(meta)s\r\n'
'%(no_commit)s'
'X-Timestamp: %(X-Timestamp)s\r\n'
'\r\n'
'\r\n'
'%(chunk_size)s\r\n'
'%(body)s\r\n' % expected)
expected = wsgi_to_bytes(expected)
self.assertEqual(b''.join(connection.sent), expected)
def test_send_put(self):
self._check_send_put('o', 'meta')
def test_send_put_non_durable(self):
self._check_send_put('o', 'meta', durable=False)
def test_send_put_unicode(self):
self._check_send_put(
wsgi_to_str('o_with_caract\xc3\xa8res_like_in_french'),
'm\xc3\xa8ta')
def test_send_put_unicode_header_name(self):
self._check_send_put(
wsgi_to_str('o_with_caract\xc3\xa8res_like_in_french'),
'm\xc3\xa8ta', meta_name='X-Object-Meta-Nam\xc3\xa8')
def _check_send_post(self, obj_name, meta_value):
ts_iter = make_timestamp_iter()
# create .data file
extra_metadata = {'X-Object-Meta-Foo': 'old_value',
'X-Object-Sysmeta-Test': 'test_sysmeta',
'Content-Type': 'test_content_type'}
ts_0 = next(ts_iter)
df = self._make_open_diskfile(obj=obj_name,
extra_metadata=extra_metadata,
timestamp=ts_0)
# create .meta file
ts_1 = next(ts_iter)
newer_metadata = {u'X-Object-Meta-Foo': meta_value,
'X-Timestamp': ts_1.internal}
# Note that diskfile expects obj_name to be a native string
# but metadata to be wsgi strings
df.write_metadata(newer_metadata)
path = six.moves.urllib.parse.quote(df.read_metadata()['name'])
wire_meta = wsgi_to_bytes(meta_value)
length = format(61 + len(path) + len(wire_meta), 'x')
connection = FakeConnection()
with df.open():
self.sender.send_post(connection, path, df)
self.assertEqual(
b''.join(connection.sent),
b'%s\r\n'
b'POST %s\r\n'
b'X-Object-Meta-Foo: %s\r\n'
b'X-Timestamp: %s\r\n'
b'\r\n'
b'\r\n' % (length.encode('ascii'), path.encode('ascii'),
wire_meta,
ts_1.internal.encode('ascii')))
def test_send_post(self):
self._check_send_post('o', 'meta')
def test_send_post_unicode(self):
self._check_send_post(
wsgi_to_str('o_with_caract\xc3\xa8res_like_in_french'),
'm\xc3\xa8ta')
def test_disconnect_timeout(self):
connection = FakeConnection()
connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
self.sender.disconnect(connection)
self.assertEqual(b''.join(connection.sent), b'')
self.assertTrue(connection.closed)
def test_disconnect(self):
connection = FakeConnection()
self.sender.disconnect(connection)
self.assertEqual(b''.join(connection.sent), b'0\r\n\r\n')
self.assertTrue(connection.closed)
@patch_policies(with_ec_default=True)
class TestSenderEC(BaseTest):
def setUp(self):
skip_if_no_xattrs()
super(TestSenderEC, self).setUp()
self.daemon_logger = debug_logger('test-ssync-sender')
self.daemon = ObjectReplicator(self.daemon_conf,
self.daemon_logger)
job = {'policy': POLICIES.legacy} # sufficient for Sender.__init__
self.sender = ssync_sender.Sender(self.daemon, None, job, None)
def test_missing_check_non_durable(self):
# sender has durable and non-durable data files for frag index 2
ts_iter = make_timestamp_iter()
frag_index = 2
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
object_hash = utils.hash_path(*object_parts)
# older durable data file at t1
t1 = next(ts_iter)
df_durable = self._make_diskfile(
device, part, *object_parts, timestamp=t1, policy=POLICIES.default,
frag_index=frag_index, commit=True, verify=False)
with df_durable.open():
self.assertEqual(t1, df_durable.durable_timestamp) # sanity
# newer non-durable data file at t2
t2 = next(ts_iter)
df_non_durable = self._make_diskfile(
device, part, *object_parts, timestamp=t2, policy=POLICIES.default,
frag_index=frag_index, commit=False, frag_prefs=[])
with df_non_durable.open():
self.assertNotEqual(df_non_durable.data_timestamp,
df_non_durable.durable_timestamp) # sanity
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.default,
'frag_index': frag_index,
}
self.sender.node = {}
# First call missing check with sender in default mode - expect the
# non-durable frag to be ignored
response = FakeResponse(
chunk_body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n')
connection = FakeConnection()
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n' + object_hash.encode('utf8') +
b' ' + t1.internal.encode('utf8') + b'\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(
available_map, {object_hash: {'ts_data': t1, 'durable': True}})
# Now make sender send non-durables and repeat missing_check - this
# time the durable is ignored and the non-durable is included in
# available_map (but NOT sent to receiver)
self.sender.include_non_durable = True
response = FakeResponse(
chunk_body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n')
connection = FakeConnection()
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'41\r\n' + object_hash.encode('utf8') +
b' ' + t2.internal.encode('utf8') + b' durable:False\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(
available_map, {object_hash: {'ts_data': t2, 'durable': False}})
# Finally, purge the non-durable frag and repeat missing-check to
# confirm that the durable frag is now found and sent to receiver
df_non_durable.purge(t2, frag_index)
response = FakeResponse(
chunk_body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n')
connection = FakeConnection()
available_map, send_map = self.sender.missing_check(connection,
response)
self.assertEqual(
b''.join(connection.sent),
b'17\r\n:MISSING_CHECK: START\r\n\r\n'
b'33\r\n' + object_hash.encode('utf8') +
b' ' + t1.internal.encode('utf8') + b'\r\n\r\n'
b'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(
available_map, {object_hash: {'ts_data': t1, 'durable': True}})
def test_updates_put_non_durable(self):
# sender has durable and non-durable data files for frag index 2 and is
# initialised to include non-durables
ts_iter = make_timestamp_iter()
frag_index = 2
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
object_hash = utils.hash_path(*object_parts)
# older durable data file
t1 = next(ts_iter)
df_durable = self._make_diskfile(
device, part, *object_parts, timestamp=t1, policy=POLICIES.default,
frag_index=frag_index, commit=True, verify=False)
with df_durable.open():
self.assertEqual(t1, df_durable.durable_timestamp) # sanity
# newer non-durable data file
t2 = next(ts_iter)
df_non_durable = self._make_diskfile(
device, part, *object_parts, timestamp=t2, policy=POLICIES.default,
frag_index=frag_index, commit=False, frag_prefs=[])
with df_non_durable.open():
self.assertNotEqual(df_non_durable.data_timestamp,
df_non_durable.durable_timestamp) # sanity
# pretend receiver requested data only
send_map = {object_hash: {'data': True}}
def check_updates(include_non_durable, expected_durable_kwarg):
# call updates and check that the call to send_put is as expected
self.sender.include_non_durable = include_non_durable
self.sender.job = {
'device': device,
'partition': part,
'policy': POLICIES.default,
'frag_index': frag_index,
}
self.sender.node = {}
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.send_post = mock.MagicMock()
response = FakeResponse(
chunk_body=':UPDATES: START\r\n:UPDATES: END\r\n')
connection = FakeConnection()
self.sender.updates(connection, response, send_map)
self.assertEqual(self.sender.send_delete.mock_calls, [])
self.assertEqual(self.sender.send_post.mock_calls, [])
self.assertEqual(1, len(self.sender.send_put.mock_calls))
args, kwargs = self.sender.send_put.call_args
connection, path, df_non_durable = args
self.assertEqual(path, '/a/c/o')
self.assertEqual({'durable': expected_durable_kwarg}, kwargs)
# note that the put line isn't actually sent since we mock
# send_put; send_put is tested separately.
self.assertEqual(
b''.join(connection.sent),
b'11\r\n:UPDATES: START\r\n\r\n'
b'f\r\n:UPDATES: END\r\n\r\n')
# note: we never expect the (False, False) case
check_updates(include_non_durable=False, expected_durable_kwarg=True)
# non-durable frag is newer so is sent
check_updates(include_non_durable=True, expected_durable_kwarg=False)
# remove the newer non-durable frag so that the durable frag is sent...
df_non_durable.purge(t2, frag_index)
check_updates(include_non_durable=True, expected_durable_kwarg=True)
class TestModuleMethods(unittest.TestCase):
def test_encode_missing(self):
object_hash = '9d41d8cd98f00b204e9800998ecf0abc'
ts_iter = make_timestamp_iter()
t_data = next(ts_iter)
t_type = next(ts_iter)
t_meta = next(ts_iter)
d_meta_data = t_meta.raw - t_data.raw
d_type_data = t_type.raw - t_data.raw
# equal data and meta timestamps -> legacy single timestamp string
expected = '%s %s' % (object_hash, t_data.internal)
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, ts_meta=t_data))
# newer meta timestamp -> hex data delta encoded as extra message part
expected = '%s %s m:%x' % (object_hash, t_data.internal, d_meta_data)
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, ts_meta=t_meta))
# newer meta timestamp -> hex data delta encoded as extra message part
# content type timestamp equals data timestamp -> no delta
expected = '%s %s m:%x' % (object_hash, t_data.internal, d_meta_data)
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_data))
# content type timestamp newer data timestamp -> delta encoded
expected = ('%s %s m:%x,t:%x'
% (object_hash, t_data.internal, d_meta_data, d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type))
# content type timestamp equal to meta timestamp -> delta encoded
expected = ('%s %s m:%x,t:%x'
% (object_hash, t_data.internal, d_meta_data, d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type))
# optional durable param
expected = ('%s %s m:%x,t:%x'
% (object_hash, t_data.internal, d_meta_data, d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type,
durable=None))
expected = ('%s %s m:%x,t:%x,durable:False'
% (object_hash, t_data.internal, d_meta_data, d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type,
durable=False))
expected = ('%s %s m:%x,t:%x'
% (object_hash, t_data.internal, d_meta_data, d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type,
durable=True))
# timestamps have offsets
t_data_offset = utils.Timestamp(t_data, offset=99)
t_meta_offset = utils.Timestamp(t_meta, offset=1)
t_type_offset = utils.Timestamp(t_type, offset=2)
expected = ('%s %s m:%x__1,t:%x__2'
% (object_hash, t_data_offset.internal, d_meta_data,
d_type_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(
object_hash, t_data_offset, t_meta_offset, t_type_offset,
durable=True))
# test encode and decode functions invert
expected = {'object_hash': object_hash, 'ts_meta': t_meta,
'ts_data': t_data, 'ts_ctype': t_type, 'durable': False}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
expected = {'object_hash': object_hash, 'ts_meta': t_meta,
'ts_data': t_meta, 'ts_ctype': t_meta, 'durable': True}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
# test encode and decode functions invert with offset
t_data_offset = utils.Timestamp(t_data, offset=1)
expected = {'object_hash': object_hash, 'ts_meta': t_meta,
'ts_data': t_data_offset, 'ts_ctype': t_type,
'durable': False}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
t_meta_offset = utils.Timestamp(t_data, offset=2)
expected = {'object_hash': object_hash, 'ts_meta': t_meta_offset,
'ts_data': t_data, 'ts_ctype': t_type,
'durable': False}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
t_type_offset = utils.Timestamp(t_type, offset=3)
expected = {'object_hash': object_hash, 'ts_meta': t_meta,
'ts_data': t_data, 'ts_ctype': t_type_offset,
'durable': False}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
expected = {'object_hash': object_hash, 'ts_meta': t_meta_offset,
'ts_data': t_data_offset, 'ts_ctype': t_type_offset,
'durable': False}
msg = ssync_sender.encode_missing(**expected)
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
def test_decode_wanted(self):
parts = ['d']
expected = {'data': True}
self.assertEqual(ssync_sender.decode_wanted(parts), expected)
parts = ['m']
expected = {'meta': True}
self.assertEqual(ssync_sender.decode_wanted(parts), expected)
parts = ['dm']
expected = {'data': True, 'meta': True}
self.assertEqual(ssync_sender.decode_wanted(parts), expected)
# you don't really expect these next few...
parts = ['md']
expected = {'data': True, 'meta': True}
self.assertEqual(ssync_sender.decode_wanted(parts), expected)
parts = ['xcy', 'funny', {'business': True}]
expected = {'data': True}
self.assertEqual(ssync_sender.decode_wanted(parts), expected)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_ssync_sender.py |
# Copyright (c) 2013 - 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import mock
import os
import unittest
import eventlet
from six.moves import urllib
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileDeleted, DiskFileExpired
from swift.common import swob
from swift.common import utils
from swift.common.storage_policy import POLICIES, EC_POLICY
from swift.obj import ssync_sender, server, diskfile
from swift.obj.reconstructor import RebuildingECDiskFileStream, \
ObjectReconstructor
from swift.obj.replicator import ObjectReplicator
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit.obj.common import BaseTest
from test.unit import patch_policies, encode_frag_archive_bodies, \
skip_if_no_xattrs, quiet_eventlet_exceptions, make_timestamp_iter
class TestBaseSsync(BaseTest):
"""
Provides a framework to test end to end interactions between sender and
receiver. The basis for each test is actual diskfile state on either side.
The connection between sender and receiver is wrapped to capture ssync
traffic for subsequent verification of the protocol. Assertions are made
about the final state of the sender and receiver diskfiles.
"""
def setUp(self):
skip_if_no_xattrs()
super(TestBaseSsync, self).setUp()
# rx side setup
self.rx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_receiver')
utils.mkdirs(os.path.join(self.rx_testdir, self.device))
conf = {
'devices': self.rx_testdir,
'mount_check': 'false',
'replication_concurrency_per_device': '0',
'log_requests': 'false'}
self.rx_logger = debug_logger(name='test-ssync-receiver')
self.rx_controller = server.ObjectController(conf, self.rx_logger)
self.ts_iter = make_timestamp_iter()
self.rx_ip = '127.0.0.1'
sock = listen_zero()
self.rx_server = eventlet.spawn(
eventlet.wsgi.server, sock, self.rx_controller, self.rx_logger)
self.rx_port = sock.getsockname()[1]
self.rx_node = {'replication_ip': self.rx_ip,
'replication_port': self.rx_port,
'device': self.device}
self.obj_data = {} # maps obj path -> obj data
def tearDown(self):
self.rx_server.kill()
super(TestBaseSsync, self).tearDown()
def make_connect_wrapper(self, sender):
"""
Make a wrapper function for the ssync_sender.Sender.connect() method
that will in turn wrap the HTTConnection.send() and the
Sender.readline() so that ssync protocol messages can be captured.
"""
orig_connect = sender.connect
trace = dict(messages=[])
def add_trace(type, msg):
# record a protocol event for later analysis
if msg.strip():
trace['messages'].append((type, msg.strip()))
def make_send_wrapper(send):
def wrapped_send(msg):
_msg = msg.split(b'\r\n', 1)[1]
_msg = _msg.rsplit(b'\r\n', 1)[0]
add_trace('tx', _msg)
send(msg)
return wrapped_send
def make_readline_wrapper(readline):
def wrapped_readline(size=1024):
data = readline(size=size)
add_trace('rx', data)
bytes_read = trace.setdefault('readline_bytes', 0)
trace['readline_bytes'] = bytes_read + len(data)
return data
return wrapped_readline
def wrapped_connect():
connection, response = orig_connect()
connection.send = make_send_wrapper(
connection.send)
response.readline = make_readline_wrapper(response.readline)
return connection, response
return wrapped_connect, trace
def _get_object_data(self, path, **kwargs):
# return data for given path
if path not in self.obj_data:
self.obj_data[path] = b'%s___data' % path.encode('ascii')
return self.obj_data[path]
def _create_ondisk_files(self, df_mgr, obj_name, policy, timestamp,
frag_indexes=None, commit=True, **kwargs):
frag_indexes = frag_indexes or [None]
metadata = {'Content-Type': 'plain/text'}
diskfiles = []
for frag_index in frag_indexes:
object_data = self._get_object_data('/a/c/%s' % obj_name,
frag_index=frag_index)
if policy.policy_type == EC_POLICY:
metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index)
metadata['X-Object-Sysmeta-Ec-Etag'] = 'fake-etag'
df = self._make_diskfile(
device=self.device, partition=self.partition, account='a',
container='c', obj=obj_name, body=object_data,
extra_metadata=metadata, timestamp=timestamp, policy=policy,
frag_index=frag_index, df_mgr=df_mgr, commit=commit, **kwargs)
diskfiles.append(df)
return diskfiles
def _open_tx_diskfile(self, obj_name, policy, frag_index=None, **kwargs):
df_mgr = self.daemon._df_router[policy]
df = df_mgr.get_diskfile(
self.device, self.partition, account='a', container='c',
obj=obj_name, policy=policy, frag_index=frag_index, **kwargs)
df.open()
return df
def _open_rx_diskfile(self, obj_name, policy, frag_index=None, **kwargs):
df = self.rx_controller.get_diskfile(
self.device, self.partition, 'a', 'c', obj_name, policy=policy,
frag_index=frag_index, open_expired=True, **kwargs)
df.open()
return df
def _verify_diskfile_sync(self, tx_df, rx_df, frag_index, same_etag=False):
# verify that diskfiles' metadata match
# sanity check, they are not the same ondisk files!
self.assertNotEqual(tx_df._datadir, rx_df._datadir)
rx_metadata = dict(rx_df.get_metadata())
for k, v in tx_df.get_metadata().items():
if k == 'X-Object-Sysmeta-Ec-Frag-Index':
# if tx_df had a frag_index then rx_df should also have one
self.assertIn(k, rx_metadata)
self.assertEqual(frag_index, int(rx_metadata.pop(k)))
elif k == 'ETag' and not same_etag:
self.assertNotEqual(v, rx_metadata.pop(k, None))
continue
else:
actual = rx_metadata.pop(k)
self.assertEqual(v, actual, 'Expected %r but got %r for %s' %
(v, actual, k))
self.assertFalse(rx_metadata)
expected_body = self._get_object_data(tx_df._name,
frag_index=frag_index)
actual_body = b''.join([chunk for chunk in rx_df.reader()])
self.assertEqual(expected_body, actual_body)
def _analyze_trace(self, trace):
"""
Parse protocol trace captured by fake connection, making some
assertions along the way, and return results as a dict of form:
results = {'tx_missing': <list of messages>,
'rx_missing': <list of messages>,
'tx_updates': <list of subreqs>,
'rx_updates': <list of messages>}
Each subreq is a dict with keys: 'method', 'path', 'headers', 'body'
"""
def tx_missing(results, line):
self.assertEqual('tx', line[0])
results['tx_missing'].append(line[1])
def rx_missing(results, line):
self.assertEqual('rx', line[0])
parts = line[1].split(b'\r\n')
for part in parts:
results['rx_missing'].append(part)
def tx_updates(results, line):
self.assertEqual('tx', line[0])
subrequests = results['tx_updates']
if line[1].startswith((b'PUT', b'DELETE', b'POST')):
parts = [swob.bytes_to_wsgi(l) for l in line[1].split(b'\r\n')]
method, path = parts[0].split()
subreq = {'method': method, 'path': path, 'req': line[1],
'headers': parts[1:]}
subrequests.append(subreq)
else:
self.assertTrue(subrequests)
body = (subrequests[-1]).setdefault('body', b'')
body += line[1]
subrequests[-1]['body'] = body
def rx_updates(results, line):
self.assertEqual('rx', line[0])
results.setdefault['rx_updates'].append(line[1])
def unexpected(results, line):
results.setdefault('unexpected', []).append(line)
# each trace line is a tuple of ([tx|rx], msg)
handshakes = iter([(('tx', b':MISSING_CHECK: START'), tx_missing),
(('tx', b':MISSING_CHECK: END'), unexpected),
(('rx', b':MISSING_CHECK: START'), rx_missing),
(('rx', b':MISSING_CHECK: END'), unexpected),
(('tx', b':UPDATES: START'), tx_updates),
(('tx', b':UPDATES: END'), unexpected),
(('rx', b':UPDATES: START'), rx_updates),
(('rx', b':UPDATES: END'), unexpected)])
expect_handshake = next(handshakes)
phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates')
results = dict((k, []) for k in phases)
handler = unexpected
lines = list(trace.get('messages', []))
lines.reverse()
while lines:
line = lines.pop()
if line == expect_handshake[0]:
handler = expect_handshake[1]
try:
expect_handshake = next(handshakes)
except StopIteration:
# should be the last line
self.assertFalse(
lines, 'Unexpected trailing lines %s' % lines)
continue
handler(results, line)
try:
# check all handshakes occurred
missed = next(handshakes)
self.fail('Handshake %s not found' % str(missed[0]))
except StopIteration:
pass
# check no message outside of a phase
self.assertFalse(results.get('unexpected'),
'Message outside of a phase: %s' % results.get(None))
return results
def _verify_ondisk_files(self, tx_objs, policy, tx_frag_index=None,
rx_frag_index=None, **kwargs):
"""
Verify tx and rx files that should be in sync.
:param tx_objs: sender diskfiles
:param policy: storage policy instance
:param tx_frag_index: the fragment index of tx diskfiles that should
have been used as a source for sync'ing
:param rx_frag_index: the fragment index of expected rx diskfiles
"""
for o_name, diskfiles in tx_objs.items():
for tx_df in diskfiles:
# check tx file still intact - ssync does not do any cleanup!
tx_df.open()
if tx_frag_index is None or tx_df._frag_index == tx_frag_index:
# this diskfile should have been sync'd,
# check rx file is ok
rx_df = self._open_rx_diskfile(
o_name, policy, rx_frag_index, **kwargs)
# for EC revert job or replication etags should match
match_etag = (tx_frag_index == rx_frag_index)
self._verify_diskfile_sync(
tx_df, rx_df, rx_frag_index, match_etag)
else:
# this diskfile should not have been sync'd,
# check no rx file,
self.assertRaises(DiskFileNotExist, self._open_rx_diskfile,
o_name, policy,
frag_index=tx_df._frag_index)
def _verify_tombstones(self, tx_objs, policy):
# verify tx and rx tombstones that should be in sync
for o_name, diskfiles in tx_objs.items():
try:
self._open_tx_diskfile(o_name, policy)
self.fail('DiskFileDeleted expected')
except DiskFileDeleted as exc:
tx_delete_time = exc.timestamp
try:
self._open_rx_diskfile(o_name, policy)
self.fail('DiskFileDeleted expected')
except DiskFileDeleted as exc:
rx_delete_time = exc.timestamp
self.assertEqual(tx_delete_time, rx_delete_time)
@patch_policies(with_ec_default=True)
class TestBaseSsyncEC(TestBaseSsync):
def setUp(self):
super(TestBaseSsyncEC, self).setUp()
self.policy = POLICIES.default
self.logger = debug_logger('test-ssync-sender')
self.daemon = ObjectReconstructor(self.daemon_conf, self.logger)
self.rx_node['backend_index'] = 0
def _get_object_data(self, path, frag_index=None, **kwargs):
# return a frag archive for given object name and frag index.
# for EC policies obj_data maps obj path -> list of frag archives
if path not in self.obj_data:
# make unique frag archives for each object name
data = path.encode('ascii') * 2 * (
self.policy.ec_ndata + self.policy.ec_nparity)
self.obj_data[path] = encode_frag_archive_bodies(
self.policy, data)
return self.obj_data[path][frag_index]
class TestSsyncEC(TestBaseSsyncEC):
def test_handoff_fragment_revert(self):
# test that a sync_revert type job does send the correct frag archives
# to the receiver
policy = POLICIES.default
rx_node_index = 0
tx_node_index = 1
# for a revert job we iterate over frag index that belongs on
# remote node
frag_index = rx_node_index
# create sender side diskfiles...
tx_objs = {}
rx_objs = {}
tx_tombstones = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
# o1 has primary and handoff fragment archives
t1 = next(self.ts_iter)
tx_objs['o1'] = self._create_ondisk_files(
tx_df_mgr, 'o1', policy, t1, (rx_node_index, tx_node_index))
# o2 only has primary
t2 = next(self.ts_iter)
tx_objs['o2'] = self._create_ondisk_files(
tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
# o3 only has handoff, rx has other frag index
t3 = next(self.ts_iter)
tx_objs['o3'] = self._create_ondisk_files(
tx_df_mgr, 'o3', policy, t3, (rx_node_index,))
rx_objs['o3'] = self._create_ondisk_files(
rx_df_mgr, 'o3', policy, t3, (13,))
# o4 primary and handoff fragment archives on tx, handoff in sync on rx
t4 = next(self.ts_iter)
tx_objs['o4'] = self._create_ondisk_files(
tx_df_mgr, 'o4', policy, t4, (tx_node_index, rx_node_index,))
rx_objs['o4'] = self._create_ondisk_files(
rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
# o5 is a tombstone, missing on receiver
t5 = next(self.ts_iter)
tx_tombstones['o5'] = self._create_ondisk_files(
tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
tx_tombstones['o5'][0].delete(t5)
suffixes = set()
for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()):
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
'frag_index': frag_index}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
sender()
# verify protocol
results = self._analyze_trace(trace)
# sender has handoff frags for o1, o3 and o4 and ts for o5
self.assertEqual(4, len(results['tx_missing']))
# receiver is missing frags for o1, o3 and ts for o5
self.assertEqual(3, len(results['rx_missing']))
self.assertEqual(3, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
sync_paths = []
for subreq in results.get('tx_updates'):
if subreq.get('method') == 'PUT':
self.assertTrue(
'X-Object-Sysmeta-Ec-Frag-Index: %s' % rx_node_index
in subreq.get('headers'))
expected_body = self._get_object_data(subreq['path'],
rx_node_index)
self.assertEqual(expected_body, subreq['body'])
elif subreq.get('method') == 'DELETE':
self.assertEqual('/a/c/o5', subreq['path'])
sync_paths.append(subreq.get('path'))
self.assertEqual(['/a/c/o1', '/a/c/o3', '/a/c/o5'], sorted(sync_paths))
# verify on disk files...
self._verify_ondisk_files(
tx_objs, policy, frag_index, rx_node_index)
self._verify_tombstones(tx_tombstones, policy)
def test_handoff_fragment_only_missing_durable_state(self):
# test that a sync_revert type job does not PUT when the rx is only
# missing durable state
policy = POLICIES.default
rx_node_index = frag_index = 0
tx_node_index = 1
# create sender side diskfiles...
tx_objs = {}
rx_objs = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
expected_subreqs = defaultdict(list)
# o1 in sync on rx but rx missing durable state - no PUT required
t1a = next(self.ts_iter) # older durable rx .data
t1b = next(self.ts_iter) # rx .meta
t1c = next(self.ts_iter) # durable tx .data, non-durable rx .data
obj_name = 'o1'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t1c, (tx_node_index, rx_node_index,))
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t1a, (rx_node_index,))
metadata = {'X-Timestamp': t1b.internal}
rx_objs[obj_name][0].write_metadata(metadata)
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t1c, (rx_node_index, 9), commit=False)
# o2 on rx has wrong frag_indexes and is non-durable - PUT required
t2 = next(self.ts_iter)
obj_name = 'o2'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t2, (tx_node_index, rx_node_index,))
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t2, (12, 13), commit=False)
expected_subreqs['PUT'].append(obj_name)
# o3 on rx has frag at newer time and non-durable - PUT required
t3 = next(self.ts_iter)
obj_name = 'o3'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t3, (tx_node_index, rx_node_index,))
t3b = next(self.ts_iter)
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t3b, (rx_node_index,), commit=False)
expected_subreqs['PUT'].append(obj_name)
# o4 on rx has a newer tombstone and even newer frags - no PUT required
t4 = next(self.ts_iter)
obj_name = 'o4'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t4, (tx_node_index, rx_node_index,))
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t4, (rx_node_index,))
t4b = next(self.ts_iter)
rx_objs[obj_name][0].delete(t4b)
t4c = next(self.ts_iter)
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t4c, (rx_node_index,), commit=False)
suffixes = set()
for diskfiles in tx_objs.values():
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
'frag_index': frag_index}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
sender()
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(4, len(results['tx_missing']))
self.assertEqual(2, len(results['rx_missing']))
self.assertEqual(2, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
for subreq in results.get('tx_updates'):
obj = subreq['path'].split('/')[3]
method = subreq['method']
self.assertTrue(obj in expected_subreqs[method],
'Unexpected %s subreq for object %s, expected %s'
% (method, obj, expected_subreqs[method]))
expected_subreqs[method].remove(obj)
if method == 'PUT':
expected_body = self._get_object_data(
subreq['path'], frag_index=rx_node_index)
self.assertEqual(expected_body, subreq['body'])
# verify all expected subreqs consumed
for _method, expected in expected_subreqs.items():
self.assertFalse(expected)
# verify on disk files...
tx_objs.pop('o4') # o4 should not have been sync'd
self._verify_ondisk_files(
tx_objs, policy, frag_index, rx_node_index)
def test_handoff_non_durable_fragment(self):
# test that a sync_revert type job does PUT when the tx is non-durable
policy = POLICIES.default
rx_node_index = frag_index = 0
tx_node_index = 1
# create sender side diskfiles...
tx_objs = {}
rx_objs = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
expected_subreqs = defaultdict(list)
# o1 non-durable on tx and missing on rx
t1 = next(self.ts_iter) # newer non-durable tx .data
obj_name = 'o1'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t1, (tx_node_index, rx_node_index,),
commit=False, frag_prefs=[])
expected_subreqs['PUT'].append(obj_name)
# o2 non-durable on tx and rx
t2 = next(self.ts_iter)
obj_name = 'o2'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t2, (tx_node_index, rx_node_index,),
commit=False, frag_prefs=[])
rx_objs[obj_name] = self._create_ondisk_files(
rx_df_mgr, obj_name, policy, t2, (rx_node_index,), commit=False,
frag_prefs=[])
# o3 durable on tx and missing on rx, to check the include_non_durable
# does not exclude durables
t3 = next(self.ts_iter)
obj_name = 'o3'
tx_objs[obj_name] = self._create_ondisk_files(
tx_df_mgr, obj_name, policy, t3, (tx_node_index, rx_node_index,))
expected_subreqs['PUT'].append(obj_name)
suffixes = set()
for diskfiles in tx_objs.values():
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...with include_non_durable
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
'frag_index': frag_index}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes,
include_non_durable=True)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
sender()
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(3, len(results['tx_missing']))
self.assertEqual(2, len(results['rx_missing']))
self.assertEqual(2, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
for subreq in results.get('tx_updates'):
obj = subreq['path'].split('/')[3]
method = subreq['method']
self.assertTrue(obj in expected_subreqs[method],
'Unexpected %s subreq for object %s, expected %s'
% (method, obj, expected_subreqs[method]))
expected_subreqs[method].remove(obj)
if method == 'PUT':
expected_body = self._get_object_data(
subreq['path'], frag_index=rx_node_index)
self.assertEqual(expected_body, subreq['body'])
# verify all expected subreqs consumed
for _method, expected in expected_subreqs.items():
self.assertFalse(expected)
# verify on disk files...
# tx_objs.pop('o4') # o4 should not have been sync'd
self._verify_ondisk_files(
tx_objs, policy, frag_index, rx_node_index, frag_prefs=[])
def test_fragment_sync(self):
# check that a sync_only type job does call reconstructor to build a
# diskfile to send, and continues making progress despite an error
# when building one diskfile
policy = POLICIES.default
rx_node_index = 0
tx_node_index = 1
# for a sync job we iterate over frag index that belongs on local node
frag_index = tx_node_index
# create sender side diskfiles...
tx_objs = {}
tx_tombstones = {}
rx_objs = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
# o1 only has primary
t1 = next(self.ts_iter)
tx_objs['o1'] = self._create_ondisk_files(
tx_df_mgr, 'o1', policy, t1, (tx_node_index,))
# o2 only has primary
t2 = next(self.ts_iter)
tx_objs['o2'] = self._create_ondisk_files(
tx_df_mgr, 'o2', policy, t2, (tx_node_index,))
# o3 only has primary
t3 = next(self.ts_iter)
tx_objs['o3'] = self._create_ondisk_files(
tx_df_mgr, 'o3', policy, t3, (tx_node_index,))
# o4 primary fragment archives on tx, handoff in sync on rx
t4 = next(self.ts_iter)
tx_objs['o4'] = self._create_ondisk_files(
tx_df_mgr, 'o4', policy, t4, (tx_node_index,))
rx_objs['o4'] = self._create_ondisk_files(
rx_df_mgr, 'o4', policy, t4, (rx_node_index,))
# o5 is a tombstone, missing on receiver
t5 = next(self.ts_iter)
tx_tombstones['o5'] = self._create_ondisk_files(
tx_df_mgr, 'o5', policy, t5, (tx_node_index,))
tx_tombstones['o5'][0].delete(t5)
suffixes = set()
for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()):
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
reconstruct_fa_calls = []
def fake_reconstruct_fa(job, node, df):
reconstruct_fa_calls.append((job, node, policy, df))
if len(reconstruct_fa_calls) == 2:
# simulate second reconstruct failing
raise DiskFileError
metadata = df.get_datafile_metadata()
content = self._get_object_data(metadata['name'],
frag_index=rx_node_index)
return RebuildingECDiskFileStream(
metadata, rx_node_index, iter([content]))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
'frag_index': frag_index,
'sync_diskfile_builder': fake_reconstruct_fa}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
sender()
# verify protocol
results = self._analyze_trace(trace)
# sender has primary for o1, o2 and o3, o4 and ts for o5
self.assertEqual(5, len(results['tx_missing']))
# receiver is missing o1, o2 and o3 and ts for o5
self.assertEqual(4, len(results['rx_missing']))
# sender can only construct 2 out of 3 missing frags
self.assertEqual(3, len(results['tx_updates']))
self.assertEqual(3, len(reconstruct_fa_calls))
self.assertFalse(results['rx_updates'])
actual_sync_paths = []
for subreq in results.get('tx_updates'):
if subreq.get('method') == 'PUT':
self.assertTrue(
'X-Object-Sysmeta-Ec-Frag-Index: %s' % rx_node_index
in subreq.get('headers'))
expected_body = self._get_object_data(
subreq['path'], frag_index=rx_node_index)
self.assertEqual(expected_body, subreq['body'])
elif subreq.get('method') == 'DELETE':
self.assertEqual('/a/c/o5', subreq['path'])
actual_sync_paths.append(subreq.get('path'))
# remove the failed df from expected synced df's
expect_sync_paths = ['/a/c/o1', '/a/c/o2', '/a/c/o3', '/a/c/o5']
failed_df = reconstruct_fa_calls[1][3]
failed_path = failed_df.get_datafile_metadata()['name']
expect_sync_paths.remove(failed_path)
failed_obj = None
for obj, diskfiles in tx_objs.items():
if diskfiles[0]._name == failed_path:
failed_obj = obj
# sanity check
self.assertTrue(tx_objs.pop(failed_obj))
# verify on disk files...
self.assertEqual(sorted(expect_sync_paths), sorted(actual_sync_paths))
self._verify_ondisk_files(
tx_objs, policy, frag_index, rx_node_index)
self._verify_tombstones(tx_tombstones, policy)
def test_send_with_frag_index_none(self):
policy = POLICIES.default
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
# create an ec fragment on the remote node
ts1 = next(self.ts_iter)
remote_df = self._create_ondisk_files(
rx_df_mgr, 'o', policy, ts1, (3,))[0]
# create a tombstone on the local node
df = self._create_ondisk_files(
tx_df_mgr, 'o', policy, ts1, (3,))[0]
suffix = os.path.basename(os.path.dirname(df._datadir))
ts2 = next(self.ts_iter)
df.delete(ts2)
# a reconstructor revert job with only tombstones will have frag_index
# explicitly set to None
job = {
'frag_index': None,
'partition': self.partition,
'policy': policy,
'device': self.device,
}
sender = ssync_sender.Sender(
self.daemon, self.rx_node, job, [suffix])
success, _ = sender()
self.assertTrue(success)
try:
remote_df.read_metadata()
except DiskFileDeleted as e:
self.assertEqual(e.timestamp, ts2)
else:
self.fail('Successfully opened remote DiskFile')
def test_send_invalid_frag_index(self):
policy = POLICIES.default
job = {'frag_index': 'No one cares',
'device': self.device,
'partition': self.partition,
'policy': policy}
self.rx_node['backend_index'] = 'Not a number'
sender = ssync_sender.Sender(
self.daemon, self.rx_node, job, ['abc'])
success, _ = sender()
self.assertFalse(success)
error_log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_log_lines))
error_msg = error_log_lines[0]
self.assertIn("Expected status 200; got 400", error_msg)
self.assertIn("Invalid X-Backend-Ssync-Frag-Index 'Not a number'",
error_msg)
def test_revert_job_with_legacy_durable(self):
# test a sync_revert type job using a sender object with a legacy
# durable file, that will create a receiver object with durable data
policy = POLICIES.default
rx_node_index = 0
# for a revert job we iterate over frag index that belongs on
# remote node
frag_index = rx_node_index
# create non durable tx obj by not committing, then create a legacy
# .durable file
tx_objs = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
t1 = next(self.ts_iter)
tx_objs['o1'] = self._create_ondisk_files(
tx_df_mgr, 'o1', policy, t1, (rx_node_index,), commit=False)
tx_datadir = tx_objs['o1'][0]._datadir
durable_file = os.path.join(tx_datadir, t1.internal + '.durable')
with open(durable_file, 'wb'):
pass
self.assertEqual(2, len(os.listdir(tx_datadir))) # sanity check
suffixes = [os.path.basename(os.path.dirname(tx_datadir))]
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
'frag_index': frag_index}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
sender()
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(1, len(results['tx_missing']))
self.assertEqual(1, len(results['rx_missing']))
self.assertEqual(1, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
# sanity check - rx diskfile is durable
expected_rx_file = '%s#%s#d.data' % (t1.internal, rx_node_index)
rx_df = self._open_rx_diskfile('o1', policy, rx_node_index)
self.assertEqual([expected_rx_file], os.listdir(rx_df._datadir))
# verify on disk files...
self._verify_ondisk_files(
tx_objs, policy, frag_index, rx_node_index)
# verify that tx and rx both generate the same suffix hashes...
tx_hashes = tx_df_mgr.get_hashes(
self.device, self.partition, suffixes, policy)
rx_hashes = rx_df_mgr.get_hashes(
self.device, self.partition, suffixes, policy)
self.assertEqual(suffixes, list(tx_hashes.keys())) # sanity
self.assertEqual(tx_hashes, rx_hashes)
# sanity check - run ssync again and expect no sync activity
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
sender.connect, trace = self.make_connect_wrapper(sender)
sender()
results = self._analyze_trace(trace)
self.assertEqual(1, len(results['tx_missing']))
self.assertFalse(results['rx_missing'])
self.assertFalse(results['tx_updates'])
self.assertFalse(results['rx_updates'])
class FakeResponse(object):
def __init__(self, frag_index, obj_data, length=None, status=200):
self.frag_index = frag_index
self.obj_data = obj_data
self.data = b''
self.length = length
self.status = status
def init(self, path, conf):
if isinstance(self.obj_data, Exception):
self.data = self.obj_data
else:
self.data = self.obj_data[path][self.frag_index]
self.conf = conf
def getheaders(self):
return {
'X-Object-Sysmeta-Ec-Frag-Index': str(self.frag_index),
'X-Object-Sysmeta-Ec-Etag': 'the etag',
'X-Backend-Timestamp': self.conf['timestamp'].internal
}
def read(self, length):
if isinstance(self.data, Exception):
raise self.data
val = self.data
self.data = b''
return val if self.length is None else val[:self.length]
class TestSsyncECReconstructorSyncJob(TestBaseSsyncEC):
def setUp(self):
super(TestSsyncECReconstructorSyncJob, self).setUp()
self.rx_node_index = 0
self.tx_node_index = 1
# create sender side diskfiles...ensure their timestamps are in the
# past so that tests that set reclaim_age=0 succeed in reclaiming
self.ts_iter = make_timestamp_iter(offset=-1000)
self.tx_objs = {}
tx_df_mgr = self.daemon._df_router[self.policy]
t1 = next(self.ts_iter)
self.tx_objs['o1'] = self._create_ondisk_files(
tx_df_mgr, 'o1', self.policy, t1, (self.tx_node_index,))
t2 = next(self.ts_iter)
self.tx_objs['o2'] = self._create_ondisk_files(
tx_df_mgr, 'o2', self.policy, t2, (self.tx_node_index,))
self.response_confs = {'/a/c/o1': {'timestamp': t1},
'/a/c/o2': {'timestamp': t2}}
self.suffixes = set()
for diskfiles in list(self.tx_objs.values()):
for df in diskfiles:
self.suffixes.add(
os.path.basename(os.path.dirname(df._datadir)))
self.job_node = dict(self.rx_node)
self.job_node['id'] = 0
self.frag_length = int(
self.tx_objs['o1'][0].get_metadata()['Content-Length'])
def _test_reconstructor_sync_job(self, frag_responses, custom_conf=None):
# Helper method to mock reconstructor to consume given lists of fake
# responses while reconstructing a fragment for a sync type job. The
# tests verify that when the reconstructed fragment iter fails in some
# way then ssync does not mistakenly create fragments on the receiving
# node which have incorrect data.
# See https://bugs.launchpad.net/swift/+bug/1631144
custom_conf = custom_conf if custom_conf else {}
# frag_responses is a list of two lists of responses to each
# reconstructor GET request for a fragment archive. The two items in
# the outer list are lists of responses for each of the two fragments
# to be reconstructed, and are used in the order that ssync syncs the
# fragments. Items in the inner lists are responses for each of the
# other fragments fetched during the reconstructor rebuild.
path_to_responses = {}
fake_get_response_calls = []
def fake_get_response(recon, node, policy, part, path, headers):
# select a list of fake responses for this path and return the next
# from the list: we don't know the order in which paths will show
# up but we do want frag_responses[0] to be used first, so the
# frag_responses aren't bound to a path until this point
if path not in path_to_responses:
path_to_responses[path] = frag_responses.pop(0)
response = path_to_responses[path].pop()
# the frag_responses list is in ssync task order: we only know the
# path when consuming the responses so initialise the path in the
# response now
if response:
response.init(path, self.response_confs[path])
# should be full path but just used for logging...
response.full_path = path
fake_get_response_calls.append(path)
return response
def fake_get_part_nodes(part):
# the reconstructor will try to remove the receiver node from the
# object ring part nodes, but the fake node we created for our
# receiver is not actually in the ring part nodes, so append it
# here simply so that the reconstructor does not fail to remove it.
return (self.policy.object_ring._get_part_nodes(part) +
[self.job_node])
with mock.patch(
'swift.obj.reconstructor.ObjectReconstructor._get_response',
fake_get_response), \
mock.patch.object(
self.policy.object_ring, 'get_part_nodes',
fake_get_part_nodes):
conf = self.daemon_conf
conf.update(custom_conf)
self.reconstructor = ObjectReconstructor(conf, logger=self.logger)
job = {
'device': self.device,
'partition': self.partition,
'policy': self.policy,
'frag_index': self.tx_node_index,
'sync_diskfile_builder':
self.reconstructor.reconstruct_fa
}
sender = ssync_sender.Sender(
self.reconstructor, self.job_node, job, self.suffixes)
sender.connect, trace = self.make_connect_wrapper(sender)
sender()
return trace
def test_sync_reconstructor_partial_rebuild(self):
# First fragment to sync gets partial content from reconstructor.
# Expect ssync job to exit early with no file written on receiver.
frag_responses = [
[FakeResponse(i, self.obj_data, length=-1)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)],
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
self._test_reconstructor_sync_job(frag_responses)
msgs = []
for obj_name in ('o1', 'o2'):
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
msgs.append('Unexpected rx diskfile for %r with content %r' %
(obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Sent data length does not match content-length',
log_lines[0])
self.assertFalse(log_lines[1:])
# trampoline for the receiver to write a log
eventlet.sleep(0)
log_lines = self.rx_logger.get_lines_for_level('warning')
self.assertEqual(1, len(log_lines), self.rx_logger.all_log_lines())
self.assertIn('ssync subrequest failed with 499',
log_lines[0])
self.assertFalse(log_lines[1:])
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
def test_sync_reconstructor_no_rebuilt_content(self):
# First fragment to sync gets no content in any response to
# reconstructor. Expect ssync job to exit early with no file written on
# receiver.
frag_responses = [
[FakeResponse(i, self.obj_data, length=0)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)],
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
self._test_reconstructor_sync_job(frag_responses)
msgs = []
for obj_name in ('o1', 'o2'):
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
msgs.append('Unexpected rx diskfile for %r with content %r' %
(obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Sent data length does not match content-length',
log_lines[0])
self.assertFalse(log_lines[1:])
# trampoline for the receiver to write a log
eventlet.sleep(0)
log_lines = self.rx_logger.get_lines_for_level('warning')
self.assertIn('ssync subrequest failed with 499',
log_lines[0])
self.assertFalse(log_lines[1:])
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
def test_sync_reconstructor_exception_during_rebuild(self):
# First fragment to sync has some reconstructor get responses raise
# exception while rebuilding. Expect ssync job to exit early with no
# files written on receiver.
frag_responses = [
# ec_ndata responses are ok, but one of these will be ignored as
# it is for the frag index being rebuilt
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata)] +
# ec_nparity responses will raise an Exception - at least one of
# these will be used during rebuild
[FakeResponse(i, Exception('raised in response read method'))
for i in range(self.policy.ec_ndata,
self.policy.ec_ndata + self.policy.ec_nparity)],
# second set of response are all good
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
with quiet_eventlet_exceptions():
self._test_reconstructor_sync_job(frag_responses)
msgs = []
for obj_name in ('o1', 'o2'):
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
msgs.append('Unexpected rx diskfile for %r with content %r' %
(obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Error trying to rebuild', log_lines[0])
self.assertIn('Sent data length does not match content-length',
log_lines[1])
self.assertFalse(log_lines[2:])
# trampoline for the receiver to write a log
eventlet.sleep(0)
log_lines = self.rx_logger.get_lines_for_level('warning')
self.assertIn('ssync subrequest failed with 499',
log_lines[0])
self.assertFalse(log_lines[1:])
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
def test_sync_reconstructor_no_responses(self):
# First fragment to sync gets no responses for reconstructor to rebuild
# with, nothing is sent to receiver so expect to skip that fragment and
# continue with second.
frag_responses = [
[None
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)],
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
trace = self._test_reconstructor_sync_job(frag_responses)
results = self._analyze_trace(trace)
self.assertEqual(2, len(results['tx_missing']))
self.assertEqual(2, len(results['rx_missing']))
self.assertEqual(1, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
self.assertEqual('PUT', results['tx_updates'][0].get('method'))
synced_obj_path = results['tx_updates'][0].get('path')
synced_obj_name = synced_obj_path[-2:]
msgs = []
obj_name = synced_obj_name
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
self.assertEqual(
self._get_object_data(synced_obj_path,
frag_index=self.rx_node_index),
b''.join([d for d in df.reader()]))
except DiskFileNotExist:
msgs.append('Missing rx diskfile for %r' % obj_name)
obj_names = list(self.tx_objs)
obj_names.remove(synced_obj_name)
obj_name = obj_names[0]
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
msgs.append('Unexpected rx diskfile for %r with content %r' %
(obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Unable to get enough responses', log_lines[0])
# trampoline for the receiver to write a log
eventlet.sleep(0)
self.assertFalse(self.rx_logger.get_lines_for_level('warning'))
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
def test_sync_reconstructor_quarantines_lonely_frag(self):
# First fragment to sync gets only one response for reconstructor to
# rebuild with, and that response is for the tx_node frag index: it
# should be quarantined, but after that the ssync session should still
# proceeed with rebuilding the second frag.
lonely_frag_responses = [
FakeResponse(i, self.obj_data, status=404)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]
lonely_frag_responses[self.tx_node_index].status = 200
frag_responses = [
lonely_frag_responses,
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
# configure reconstructor to quarantine the lonely frag
custom_conf = {'reclaim_age': 0, 'quarantine_threshold': 1}
trace = self._test_reconstructor_sync_job(frag_responses, custom_conf)
results = self._analyze_trace(trace)
self.assertEqual(2, len(results['tx_missing']))
self.assertEqual(2, len(results['rx_missing']))
self.assertEqual(1, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
self.assertEqual('PUT', results['tx_updates'][0].get('method'))
synced_obj_path = results['tx_updates'][0].get('path')
synced_obj_name = synced_obj_path[-2:]
# verify that the second frag was rebuilt on rx node...
msgs = []
try:
df = self._open_rx_diskfile(
synced_obj_name, self.policy, self.rx_node_index)
self.assertEqual(
self._get_object_data(synced_obj_path,
frag_index=self.rx_node_index),
b''.join([d for d in df.reader()]))
except DiskFileNotExist:
msgs.append('Missing rx diskfile for %r' % synced_obj_name)
# ...and it is still on tx node...
try:
df = self._open_tx_diskfile(
synced_obj_name, self.policy, self.tx_node_index)
self.assertEqual(
self._get_object_data(df._name,
frag_index=self.tx_node_index),
b''.join([d for d in df.reader()]))
except DiskFileNotExist:
msgs.append('Missing tx diskfile for %r' % synced_obj_name)
# verify that the lonely frag was not rebuilt on rx node and was
# removed on tx node
obj_names = list(self.tx_objs)
obj_names.remove(synced_obj_name)
quarantined_obj_name = obj_names[0]
try:
df = self._open_rx_diskfile(
quarantined_obj_name, self.policy, self.rx_node_index)
msgs.append(
'Unexpected rx diskfile for %r with content %r' %
(quarantined_obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
try:
df = self._open_tx_diskfile(
quarantined_obj_name, self.policy, self.tx_node_index)
msgs.append(
'Unexpected tx diskfile for %r with content %r' %
(quarantined_obj_name, b''.join([d for d in df.reader()])))
except DiskFileNotExist:
pass # expected outcome
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines), error_lines)
self.assertIn('Unable to get enough responses', error_lines[0])
self.assertIn('Unable to get enough responses', error_lines[1])
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn('Quarantined object', warning_lines[0])
# check we have a quarantined data file
df_mgr = self.daemon._df_router[self.policy]
quarantined_df = df_mgr.get_diskfile(
self.device, self.partition, account='a', container='c',
obj=quarantined_obj_name, policy=self.policy,
frag_index=self.tx_node_index)
df_hash = os.path.basename(quarantined_df._datadir)
quarantine_dir = os.path.join(
quarantined_df._device_path, 'quarantined',
diskfile.get_data_dir(self.policy), df_hash)
self.assertTrue(os.path.isdir(quarantine_dir))
data_file = os.listdir(quarantine_dir)[0]
with open(os.path.join(quarantine_dir, data_file), 'rb') as fd:
self.assertEqual(
self._get_object_data(quarantined_df._name,
frag_index=self.tx_node_index),
fd.read())
# trampoline for the receiver to write a log
eventlet.sleep(0)
self.assertFalse(self.rx_logger.get_lines_for_level('warning'))
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
def test_sync_reconstructor_rebuild_ok(self):
# Sanity test for this class of tests. Both fragments get a full
# complement of responses and rebuild correctly.
frag_responses = [
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)],
[FakeResponse(i, self.obj_data)
for i in range(self.policy.ec_ndata + self.policy.ec_nparity)]]
trace = self._test_reconstructor_sync_job(frag_responses)
results = self._analyze_trace(trace)
self.assertEqual(2, len(results['tx_missing']))
self.assertEqual(2, len(results['rx_missing']))
self.assertEqual(2, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
msgs = []
for obj_name in self.tx_objs:
try:
df = self._open_rx_diskfile(
obj_name, self.policy, self.rx_node_index)
self.assertEqual(
self._get_object_data(df._name,
frag_index=self.rx_node_index),
b''.join([d for d in df.reader()]))
except DiskFileNotExist:
msgs.append('Missing rx diskfile for %r' % obj_name)
if msgs:
self.fail('Failed with:\n%s' % '\n'.join(msgs))
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(
self.logger.get_lines_for_level('error'))
# trampoline for the receiver to write a log
eventlet.sleep(0)
self.assertFalse(self.rx_logger.get_lines_for_level('warning'))
self.assertFalse(self.rx_logger.get_lines_for_level('error'))
@patch_policies
class TestSsyncReplication(TestBaseSsync):
def setUp(self):
super(TestSsyncReplication, self).setUp()
self.logger = debug_logger('test-ssync-sender')
self.daemon = ObjectReplicator(self.daemon_conf, self.logger)
def test_sync(self):
policy = POLICIES.default
# create sender side diskfiles...
tx_objs = {}
rx_objs = {}
tx_tombstones = {}
rx_tombstones = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
# o1 and o2 are on tx only
t1 = next(self.ts_iter)
tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1)
t2 = next(self.ts_iter)
tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2)
# o3 is on tx and older copy on rx
t3a = next(self.ts_iter)
rx_objs['o3'] = self._create_ondisk_files(rx_df_mgr, 'o3', policy, t3a)
t3b = next(self.ts_iter)
tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3b)
# o4 in sync on rx and tx
t4 = next(self.ts_iter)
tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4)
rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4)
# o5 is a tombstone, missing on receiver
t5 = next(self.ts_iter)
tx_tombstones['o5'] = self._create_ondisk_files(
tx_df_mgr, 'o5', policy, t5)
tx_tombstones['o5'][0].delete(t5)
# o6 is a tombstone, in sync on tx and rx
t6 = next(self.ts_iter)
tx_tombstones['o6'] = self._create_ondisk_files(
tx_df_mgr, 'o6', policy, t6)
tx_tombstones['o6'][0].delete(t6)
rx_tombstones['o6'] = self._create_ondisk_files(
rx_df_mgr, 'o6', policy, t6)
rx_tombstones['o6'][0].delete(t6)
# o7 is a tombstone on tx, older data on rx
t7a = next(self.ts_iter)
rx_objs['o7'] = self._create_ondisk_files(rx_df_mgr, 'o7', policy, t7a)
t7b = next(self.ts_iter)
tx_tombstones['o7'] = self._create_ondisk_files(
tx_df_mgr, 'o7', policy, t7b)
tx_tombstones['o7'][0].delete(t7b)
suffixes = set()
for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()):
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
success, in_sync_objs = sender()
self.assertEqual(7, len(in_sync_objs))
self.assertTrue(success)
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(7, len(results['tx_missing']))
self.assertEqual(5, len(results['rx_missing']))
self.assertEqual(5, len(results['tx_updates']))
self.assertFalse(results['rx_updates'])
sync_paths = []
for subreq in results.get('tx_updates'):
if subreq.get('method') == 'PUT':
self.assertTrue(
subreq['path'] in ('/a/c/o1', '/a/c/o2', '/a/c/o3'))
expected_body = self._get_object_data(subreq['path'])
self.assertEqual(expected_body, subreq['body'])
elif subreq.get('method') == 'DELETE':
self.assertTrue(subreq['path'] in ('/a/c/o5', '/a/c/o7'))
sync_paths.append(subreq.get('path'))
self.assertEqual(
['/a/c/o1', '/a/c/o2', '/a/c/o3', '/a/c/o5', '/a/c/o7'],
sorted(sync_paths))
# verify on disk files...
self._verify_ondisk_files(tx_objs, policy)
self._verify_tombstones(tx_tombstones, policy)
def test_nothing_to_sync(self):
job = {'device': self.device,
'partition': self.partition,
'policy': POLICIES.default}
node = {'replication_ip': self.rx_ip,
'replication_port': self.rx_port,
'device': self.device,
'index': 0}
sender = ssync_sender.Sender(self.daemon, node, job, ['abc'])
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
result, in_sync_objs = sender()
self.assertTrue(result)
self.assertFalse(in_sync_objs)
results = self._analyze_trace(trace)
self.assertFalse(results['tx_missing'])
self.assertFalse(results['rx_missing'])
self.assertFalse(results['tx_updates'])
self.assertFalse(results['rx_updates'])
# Minimal receiver response as read by sender:
# 2 <-- initial \r\n to start ssync exchange
# + 23 <-- :MISSING CHECK START\r\n
# + 2 <-- \r\n (minimal missing check response)
# + 21 <-- :MISSING CHECK END\r\n
# + 17 <-- :UPDATES START\r\n
# + 15 <-- :UPDATES END\r\n
# TOTAL = 80
self.assertEqual(80, trace.get('readline_bytes'))
def test_meta_file_sync(self):
policy = POLICIES.default
# create diskfiles...
tx_objs = {}
rx_objs = {}
tx_tombstones = {}
rx_tombstones = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
expected_subreqs = defaultdict(list)
# o1 on tx only with meta file
t1 = next(self.ts_iter)
tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1)
t1_meta = next(self.ts_iter)
metadata = {'X-Timestamp': t1_meta.internal,
'X-Object-Meta-Test': 'o1',
'X-Object-Sysmeta-Test': 'sys_o1'}
tx_objs['o1'][0].write_metadata(metadata)
expected_subreqs['PUT'].append('o1')
expected_subreqs['POST'].append('o1')
# o2 on tx with meta, on rx without meta
t2 = next(self.ts_iter)
tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2)
t2_meta = next(self.ts_iter)
metadata = {'X-Timestamp': t2_meta.internal,
'X-Object-Meta-Test': 'o2',
'X-Object-Sysmeta-Test': 'sys_o2'}
tx_objs['o2'][0].write_metadata(metadata)
rx_objs['o2'] = self._create_ondisk_files(rx_df_mgr, 'o2', policy, t2)
expected_subreqs['POST'].append('o2')
# o3 is on tx with meta, rx has newer data but no meta,
# meta timestamp has an offset
t3a = next(self.ts_iter)
tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a)
t3b = next(self.ts_iter)
rx_objs['o3'] = self._create_ondisk_files(rx_df_mgr, 'o3', policy, t3b)
t3_meta = next(self.ts_iter)
t3_meta = utils.Timestamp(t3_meta, offset=2)
metadata = {'X-Timestamp': t3_meta.internal,
'X-Object-Meta-Test': 'o3',
'X-Object-Sysmeta-Test': 'sys_o3'}
tx_objs['o3'][0].write_metadata(metadata)
expected_subreqs['POST'].append('o3')
# o4 is on tx with meta, rx has older data and up to date meta,
t4a = next(self.ts_iter)
rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4a)
t4b = next(self.ts_iter)
tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4b)
t4_meta = next(self.ts_iter)
metadata = {'X-Timestamp': t4_meta.internal,
'X-Object-Meta-Test': 'o4',
'X-Object-Sysmeta-Test': 'sys_o4'}
tx_objs['o4'][0].write_metadata(metadata)
rx_objs['o4'][0].write_metadata(metadata)
expected_subreqs['PUT'].append('o4')
# o5 is on tx with meta, rx is in sync with data and meta
t5 = next(self.ts_iter)
t5 = utils.Timestamp(t5, offset=1) # note: use an offset for this test
rx_objs['o5'] = self._create_ondisk_files(rx_df_mgr, 'o5', policy, t5)
tx_objs['o5'] = self._create_ondisk_files(tx_df_mgr, 'o5', policy, t5)
t5_meta = next(self.ts_iter)
metadata = {'X-Timestamp': t5_meta.internal,
'X-Object-Meta-Test': 'o5',
'X-Object-Sysmeta-Test': 'sys_o5'}
tx_objs['o5'][0].write_metadata(metadata)
rx_objs['o5'][0].write_metadata(metadata)
# o6 is tombstone on tx, rx has older data and meta
t6 = next(self.ts_iter)
tx_tombstones['o6'] = self._create_ondisk_files(
tx_df_mgr, 'o6', policy, t6)
rx_tombstones['o6'] = self._create_ondisk_files(
rx_df_mgr, 'o6', policy, t6)
metadata = {'X-Timestamp': next(self.ts_iter).internal,
'X-Object-Meta-Test': 'o6',
'X-Object-Sysmeta-Test': 'sys_o6'}
rx_tombstones['o6'][0].write_metadata(metadata)
tx_tombstones['o6'][0].delete(next(self.ts_iter))
expected_subreqs['DELETE'].append('o6')
# o7 is tombstone on rx, tx has older data and meta,
# no subreqs expected...
t7 = next(self.ts_iter)
tx_objs['o7'] = self._create_ondisk_files(tx_df_mgr, 'o7', policy, t7)
rx_tombstones['o7'] = self._create_ondisk_files(
rx_df_mgr, 'o7', policy, t7)
metadata = {'X-Timestamp': next(self.ts_iter).internal,
'X-Object-Meta-Test': 'o7',
'X-Object-Sysmeta-Test': 'sys_o7'}
tx_objs['o7'][0].write_metadata(metadata)
rx_tombstones['o7'][0].delete(next(self.ts_iter))
# o8 is on tx with meta, rx has in sync data but meta with different
# offset
t8 = next(self.ts_iter)
rx_objs['o8'] = self._create_ondisk_files(rx_df_mgr, 'o8', policy, t8)
tx_objs['o8'] = self._create_ondisk_files(tx_df_mgr, 'o8', policy, t8)
t8_meta = next(self.ts_iter)
t8_meta_offset = utils.Timestamp(t8_meta, offset=4)
metadata = {'X-Timestamp': t8_meta_offset.internal,
'X-Object-Meta-Test': 'o8',
'X-Object-Sysmeta-Test': 'sys_o8'}
tx_objs['o8'][0].write_metadata(metadata)
# different ts_meta offset on rx
t8_meta_offset = utils.Timestamp(t8_meta, offset=3)
metadata = {'X-Timestamp': t8_meta_offset.internal,
'X-Object-Meta-Test': 'o8',
'X-Object-Sysmeta-Test': 'sys_o8'}
rx_objs['o8'][0].write_metadata(metadata)
expected_subreqs['POST'].append('o8')
suffixes = set()
for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()):
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
success, in_sync_objs = sender()
self.assertEqual(8, len(in_sync_objs))
self.assertTrue(success)
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(8, len(results['tx_missing']))
self.assertEqual(6, len(results['rx_missing']))
for subreq in results.get('tx_updates'):
obj = subreq['path'].split('/')[3]
method = subreq['method']
self.assertTrue(obj in expected_subreqs[method],
'Unexpected %s subreq for object %s, expected %s'
% (method, obj, expected_subreqs[method]))
expected_subreqs[method].remove(obj)
if method == 'PUT':
expected_body = self._get_object_data(subreq['path'])
self.assertEqual(expected_body, subreq['body'])
# verify all expected subreqs consumed
for _method, expected in expected_subreqs.items():
self.assertFalse(expected)
self.assertFalse(results['rx_updates'])
# verify on disk files...
del tx_objs['o7'] # o7 not expected to be sync'd
self._verify_ondisk_files(tx_objs, policy)
self._verify_tombstones(tx_tombstones, policy)
for oname, rx_obj in rx_objs.items():
df = rx_obj[0].open()
metadata = df.get_metadata()
self.assertEqual(metadata['X-Object-Meta-Test'], oname)
self.assertEqual(metadata['X-Object-Sysmeta-Test'], 'sys_' + oname)
def test_expired_object(self):
# verify that expired objects sync
policy = POLICIES.default
tx_df_mgr = self.daemon._df_router[policy]
t1 = next(self.ts_iter)
obj_name = 'o1'
metadata = {'X-Delete-At': '0', 'Content-Type': 'plain/text'}
df = self._make_diskfile(
obj=obj_name, body=self._get_object_data('/a/c/%s' % obj_name),
extra_metadata=metadata, timestamp=t1, policy=policy,
df_mgr=tx_df_mgr, verify=False)
with self.assertRaises(DiskFileExpired):
df.open() # sanity check - expired
# create ssync sender instance...
suffixes = [os.path.basename(os.path.dirname(df._datadir))]
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
success, in_sync_objs = sender()
self.assertEqual(1, len(in_sync_objs))
self.assertTrue(success)
# allow the expired sender diskfile to be opened for verification
df._open_expired = True
self._verify_ondisk_files({obj_name: [df]}, policy)
def _check_no_longer_expired_object(self, obj_name, df, policy):
# verify that objects with x-delete-at metadata that are not expired
# can be sync'd
def do_ssync():
# create ssync sender instance...
suffixes = [os.path.basename(os.path.dirname(df._datadir))]
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
return sender()
with self.assertRaises(DiskFileExpired):
df.open() # sanity check - expired
t1_meta = next(self.ts_iter)
df.write_metadata({'X-Timestamp': t1_meta.internal}) # no x-delete-at
df.open() # sanity check - no longer expired
success, in_sync_objs = do_ssync()
self.assertEqual(1, len(in_sync_objs))
self.assertTrue(success)
self._verify_ondisk_files({obj_name: [df]}, policy)
# update object metadata with x-delete-at in distant future
t2_meta = next(self.ts_iter)
df.write_metadata({'X-Timestamp': t2_meta.internal,
'X-Delete-At': str(int(t2_meta) + 10000)})
df.open() # sanity check - not expired
success, in_sync_objs = do_ssync()
self.assertEqual(1, len(in_sync_objs))
self.assertTrue(success)
self._verify_ondisk_files({obj_name: [df]}, policy)
# update object metadata with x-delete-at in not so distant future to
# check that we can update rx with older x-delete-at than it's current
t3_meta = next(self.ts_iter)
df.write_metadata({'X-Timestamp': t3_meta.internal,
'X-Delete-At': str(int(t2_meta) + 5000)})
df.open() # sanity check - not expired
success, in_sync_objs = do_ssync()
self.assertEqual(1, len(in_sync_objs))
self.assertTrue(success)
self._verify_ondisk_files({obj_name: [df]}, policy)
def test_no_longer_expired_object_syncs(self):
policy = POLICIES.default
# simulate o1 that was PUT with x-delete-at that is now expired but
# later had a POST that had no x-delete-at: object should not expire.
tx_df_mgr = self.daemon._df_router[policy]
t1 = next(self.ts_iter)
obj_name = 'o1'
metadata = {'X-Delete-At': '0', 'Content-Type': 'plain/text'}
df = self._make_diskfile(
obj=obj_name, body=self._get_object_data('/a/c/%s' % obj_name),
extra_metadata=metadata, timestamp=t1, policy=policy,
df_mgr=tx_df_mgr, verify=False)
self._check_no_longer_expired_object(obj_name, df, policy)
def test_no_longer_expired_object_syncs_meta(self):
policy = POLICIES.default
# simulate o1 that was PUT with x-delete-at that is now expired but
# later had a POST that had no x-delete-at: object should not expire.
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
t1 = next(self.ts_iter)
obj_name = 'o1'
metadata = {'X-Delete-At': '0', 'Content-Type': 'plain/text'}
df = self._make_diskfile(
obj=obj_name, body=self._get_object_data('/a/c/%s' % obj_name),
extra_metadata=metadata, timestamp=t1, policy=policy,
df_mgr=tx_df_mgr, verify=False)
# rx got the .data file but is missing the .meta
rx_df = self._make_diskfile(
obj=obj_name, body=self._get_object_data('/a/c/%s' % obj_name),
extra_metadata=metadata, timestamp=t1, policy=policy,
df_mgr=rx_df_mgr, verify=False)
with self.assertRaises(DiskFileExpired):
rx_df.open() # sanity check - expired
self._check_no_longer_expired_object(obj_name, df, policy)
def test_meta_file_not_synced_to_legacy_receiver(self):
# verify that the sender does sync a data file to a legacy receiver,
# but does not PUT meta file content to a legacy receiver
policy = POLICIES.default
# create diskfiles...
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
# rx has data at t1 but no meta
# object is on tx with data at t2, meta at t3,
t1 = next(self.ts_iter)
self._create_ondisk_files(rx_df_mgr, 'o1', policy, t1)
t2 = next(self.ts_iter)
tx_obj = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t2)[0]
t3 = next(self.ts_iter)
metadata = {'X-Timestamp': t3.internal,
'X-Object-Meta-Test': 'o3',
'X-Object-Sysmeta-Test': 'sys_o3'}
tx_obj.write_metadata(metadata)
suffixes = [os.path.basename(os.path.dirname(tx_obj._datadir))]
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
def _legacy_check_missing(self, line):
# reproduces behavior of 'legacy' ssync receiver missing_checks()
parts = line.decode('ascii').split()
object_hash = urllib.parse.unquote(parts[0])
timestamp = urllib.parse.unquote(parts[1])
want = False
try:
df = self.diskfile_mgr.get_diskfile_from_hash(
self.device, self.partition, object_hash, self.policy,
frag_index=self.frag_index)
except DiskFileNotExist:
want = True
else:
try:
df.open()
except DiskFileDeleted as err:
want = err.timestamp < timestamp
except DiskFileError:
want = True
else:
want = df.timestamp < timestamp
if want:
return urllib.parse.quote(object_hash)
return None
# run the sync protocol...
func = 'swift.obj.ssync_receiver.Receiver._check_missing'
with mock.patch(func, _legacy_check_missing):
success, in_sync_objs = sender()
self.assertEqual(1, len(in_sync_objs))
self.assertTrue(success)
# verify protocol, expecting only a PUT to legacy receiver
results = self._analyze_trace(trace)
self.assertEqual(1, len(results['tx_missing']))
self.assertEqual(1, len(results['rx_missing']))
self.assertEqual(1, len(results['tx_updates']))
self.assertEqual('PUT', results['tx_updates'][0]['method'])
self.assertFalse(results['rx_updates'])
# verify on disk files...
rx_obj = self._open_rx_diskfile('o1', policy)
tx_obj = self._open_tx_diskfile('o1', policy)
# with legacy behavior rx_obj data and meta timestamps are equal
self.assertEqual(t2, rx_obj.data_timestamp)
self.assertEqual(t2, rx_obj.timestamp)
# with legacy behavior rx_obj data timestamp should equal tx_obj
self.assertEqual(rx_obj.data_timestamp, tx_obj.data_timestamp)
# tx meta file should not have been sync'd to rx data file
self.assertNotIn('X-Object-Meta-Test', rx_obj.get_metadata())
def test_content_type_sync(self):
policy = POLICIES.default
# create diskfiles...
tx_objs = {}
rx_objs = {}
tx_df_mgr = self.daemon._df_router[policy]
rx_df_mgr = self.rx_controller._diskfile_router[policy]
expected_subreqs = defaultdict(list)
# o1 on tx only with two meta files
name = 'o1'
t1 = next(self.ts_iter)
tx_objs[name] = self._create_ondisk_files(tx_df_mgr, name, policy, t1)
t1_type = next(self.ts_iter)
metadata_1 = {'X-Timestamp': t1_type.internal,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t1_type.internal}
tx_objs[name][0].write_metadata(metadata_1)
t1_meta = next(self.ts_iter)
metadata_2 = {'X-Timestamp': t1_meta.internal,
'X-Object-Meta-Test': name}
tx_objs[name][0].write_metadata(metadata_2)
expected_subreqs['PUT'].append(name)
expected_subreqs['POST'].append(name)
# o2 on tx with two meta files, rx has .data and newest .meta but is
# missing latest content-type
name = 'o2'
t2 = next(self.ts_iter)
tx_objs[name] = self._create_ondisk_files(tx_df_mgr, name, policy, t2)
t2_type = next(self.ts_iter)
metadata_1 = {'X-Timestamp': t2_type.internal,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t2_type.internal}
tx_objs[name][0].write_metadata(metadata_1)
t2_meta = next(self.ts_iter)
metadata_2 = {'X-Timestamp': t2_meta.internal,
'X-Object-Meta-Test': name}
tx_objs[name][0].write_metadata(metadata_2)
rx_objs[name] = self._create_ondisk_files(rx_df_mgr, name, policy, t2)
rx_objs[name][0].write_metadata(metadata_2)
expected_subreqs['POST'].append(name)
# o3 on tx with two meta files, rx has .data and one .meta but does
# have latest content-type so nothing to sync
name = 'o3'
t3 = next(self.ts_iter)
tx_objs[name] = self._create_ondisk_files(tx_df_mgr, name, policy, t3)
t3_type = next(self.ts_iter)
metadata_1 = {'X-Timestamp': t3_type.internal,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t3_type.internal}
tx_objs[name][0].write_metadata(metadata_1)
t3_meta = next(self.ts_iter)
metadata_2 = {'X-Timestamp': t3_meta.internal,
'X-Object-Meta-Test': name}
tx_objs[name][0].write_metadata(metadata_2)
rx_objs[name] = self._create_ondisk_files(rx_df_mgr, name, policy, t3)
metadata_2b = {'X-Timestamp': t3_meta.internal,
'X-Object-Meta-Test': name,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t3_type.internal}
rx_objs[name][0].write_metadata(metadata_2b)
# o4 on tx with one meta file having latest content-type, rx has
# .data and two .meta having latest content-type so nothing to sync
# i.e. o4 is the reverse of o3 scenario
name = 'o4'
t4 = next(self.ts_iter)
tx_objs[name] = self._create_ondisk_files(tx_df_mgr, name, policy, t4)
t4_type = next(self.ts_iter)
t4_meta = next(self.ts_iter)
metadata_2b = {'X-Timestamp': t4_meta.internal,
'X-Object-Meta-Test': name,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t4_type.internal}
tx_objs[name][0].write_metadata(metadata_2b)
rx_objs[name] = self._create_ondisk_files(rx_df_mgr, name, policy, t4)
metadata_1 = {'X-Timestamp': t4_type.internal,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t4_type.internal}
rx_objs[name][0].write_metadata(metadata_1)
metadata_2 = {'X-Timestamp': t4_meta.internal,
'X-Object-Meta-Test': name}
rx_objs[name][0].write_metadata(metadata_2)
# o5 on tx with one meta file having latest content-type, rx has
# .data and no .meta
name = 'o5'
t5 = next(self.ts_iter)
tx_objs[name] = self._create_ondisk_files(tx_df_mgr, name, policy, t5)
t5_type = next(self.ts_iter)
t5_meta = next(self.ts_iter)
metadata = {'X-Timestamp': t5_meta.internal,
'X-Object-Meta-Test': name,
'Content-Type': 'text/test',
'Content-Type-Timestamp': t5_type.internal}
tx_objs[name][0].write_metadata(metadata)
rx_objs[name] = self._create_ondisk_files(rx_df_mgr, name, policy, t5)
expected_subreqs['POST'].append(name)
suffixes = set()
for diskfiles in tx_objs.values():
for df in diskfiles:
suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
# create ssync sender instance...
job = {'device': self.device,
'partition': self.partition,
'policy': policy}
node = dict(self.rx_node)
sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
# wrap connection from tx to rx to capture ssync messages...
sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
success, in_sync_objs = sender()
self.assertEqual(5, len(in_sync_objs), trace['messages'])
self.assertTrue(success)
# verify protocol
results = self._analyze_trace(trace)
self.assertEqual(5, len(results['tx_missing']))
self.assertEqual(3, len(results['rx_missing']))
for subreq in results.get('tx_updates'):
obj = subreq['path'].split('/')[3]
method = subreq['method']
self.assertTrue(obj in expected_subreqs[method],
'Unexpected %s subreq for object %s, expected %s'
% (method, obj, expected_subreqs[method]))
expected_subreqs[method].remove(obj)
if method == 'PUT':
expected_body = self._get_object_data(subreq['path'])
self.assertEqual(expected_body, subreq['body'])
# verify all expected subreqs consumed
for _method, expected in expected_subreqs.items():
self.assertFalse(expected,
'Expected subreqs not seen for %s for objects %s'
% (_method, expected))
self.assertFalse(results['rx_updates'])
# verify on disk files...
self._verify_ondisk_files(tx_objs, policy)
for oname, rx_obj in rx_objs.items():
df = rx_obj[0].open()
metadata = df.get_metadata()
self.assertEqual(metadata['X-Object-Meta-Test'], oname)
self.assertEqual(metadata['Content-Type'], 'text/test')
# verify that tx and rx both generate the same suffix hashes...
tx_hashes = tx_df_mgr.get_hashes(
self.device, self.partition, suffixes, policy)
rx_hashes = rx_df_mgr.get_hashes(
self.device, self.partition, suffixes, policy)
self.assertEqual(tx_hashes, rx_hashes)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_ssync.py |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from unittest import main, TestCase
from test.debug_logger import debug_logger
from test.unit import FakeRing, mocked_http_conn, make_timestamp_iter
from tempfile import mkdtemp
from shutil import rmtree
from collections import defaultdict
from copy import deepcopy
import mock
import six
from six.moves import urllib
from swift.common import internal_client, utils, swob
from swift.common.utils import Timestamp
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class FakeInternalClient(object):
container_ring = FakeRing()
def __init__(self, aco_dict):
"""
:param aco_dict: A dict of account ,container, object that
FakeInternalClient can return when each method called. Each account
has container name dict, and each container dict has a list of
objects in the container.
e.g. {'account1': {
'container1: ['obj1', 'obj2', {'name': 'obj3'}],
'container2: [],
},
'account2': {},
}
N.B. the objects entries should be the container-server JSON style
db rows, but this fake will dynamically detect when names are given
and wrap them for convenience.
"""
self.aco_dict = defaultdict(dict)
self.aco_dict.update(aco_dict)
def get_account_info(self, account):
acc_dict = self.aco_dict[account]
container_count = len(acc_dict)
obj_count = sum(len(objs) for objs in acc_dict.values())
return container_count, obj_count
def iter_containers(self, account, prefix=''):
acc_dict = self.aco_dict[account]
return [{'name': six.text_type(container)}
for container in sorted(acc_dict)
if container.startswith(prefix)]
def delete_container(*a, **kw):
pass
def iter_objects(self, account, container):
acc_dict = self.aco_dict[account]
obj_iter = acc_dict.get(container, [])
resp = []
for obj in obj_iter:
if not isinstance(obj, dict):
obj = {'name': six.text_type(obj)}
resp.append(obj)
return resp
def delete_object(*a, **kw):
pass
class TestObjectExpirer(TestCase):
maxDiff = None
internal_client = None
def setUp(self):
global not_sleep
self.old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
self.ts = make_timestamp_iter()
self.empty_time = str(int(time() - 864000))
self.past_time = str(int(time() - 86400))
self.just_past_time = str(int(time() - 1))
self.future_time = str(int(time() + 86400))
# Dummy task queue for test
self.fake_swift = FakeInternalClient({
'.expiring_objects': {
# this task container will be checked
self.empty_time: [],
self.past_time: [
# tasks ready for execution
self.past_time + '-a0/c0/o0',
self.past_time + '-a1/c1/o1',
self.past_time + '-a2/c2/o2',
self.past_time + '-a3/c3/o3',
self.past_time + '-a4/c4/o4'],
self.just_past_time: [
self.just_past_time + '-a5/c5/o5',
self.just_past_time + '-a6/c6/o6',
self.just_past_time + '-a7/c7/o7',
# task objects for unicode test
self.just_past_time + u'-a8/c8/o8\u2661',
self.just_past_time + u'-a9/c9/o9\xf8',
# this task will be skipped and prevent us from even
# *trying* to delete the container
self.future_time + '-a10/c10/o10'],
# this task container will be skipped
self.future_time: [
self.future_time + '-a11/c11/o11']}
})
self.expirer = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
# map of times to target object paths which should be expirerd now
self.expired_target_paths = {
self.past_time: [
swob.wsgi_to_str(tgt) for tgt in (
'a0/c0/o0', 'a1/c1/o1', 'a2/c2/o2', 'a3/c3/o3', 'a4/c4/o4',
)
],
self.just_past_time: [
swob.wsgi_to_str(tgt) for tgt in (
'a5/c5/o5', 'a6/c6/o6', 'a7/c7/o7',
'a8/c8/o8\xe2\x99\xa1', 'a9/c9/o9\xc3\xb8',
)
],
}
def make_fake_ic(self, app):
app._pipeline_final_app = mock.MagicMock()
return internal_client.InternalClient(None, 'fake-ic', 1, app=app)
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
def test_init(self):
with mock.patch.object(expirer, 'InternalClient',
return_value=self.fake_swift) as mock_ic:
x = expirer.ObjectExpirer({}, logger=self.logger)
self.assertEqual(mock_ic.mock_calls, [mock.call(
'/etc/swift/object-expirer.conf', 'Swift Object Expirer', 3,
use_replication_network=True,
global_conf={'log_name': 'object-expirer-ic'})])
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
self.assertEqual(x.expiring_objects_account, '.expiring_objects')
self.assertIs(x.swift, self.fake_swift)
x = expirer.ObjectExpirer({'auto_create_account_prefix': '-'},
logger=self.logger, swift=self.fake_swift)
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'
])
self.assertEqual(x.expiring_objects_account, '-expiring_objects')
def test_init_internal_client_log_name(self):
def _do_test_init_ic_log_name(conf, exp_internal_client_log_name):
with mock.patch(
'swift.obj.expirer.InternalClient') \
as mock_ic:
expirer.ObjectExpirer(conf)
mock_ic.assert_called_once_with(
'/etc/swift/object-expirer.conf',
'Swift Object Expirer', 3,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
_do_test_init_ic_log_name({}, 'object-expirer-ic')
_do_test_init_ic_log_name({'log_name': 'my-object-expirer'},
'my-object-expirer-ic')
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({}, swift=self.fake_swift)
vals = {
'processes': 5,
'process': 1,
}
x.get_process_values(vals)
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals, swift=self.fake_swift)
x.get_process_values({})
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals, swift=self.fake_swift)
expected_msg = 'process must be an integer greater' \
' than or equal to 0'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({}, swift=self.fake_swift)
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals, swift=self.fake_swift)
expected_msg = 'processes must be an integer greater' \
' than or equal to 0'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({}, swift=self.fake_swift)
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals, swift=self.fake_swift)
expected_msg = 'process must be less than processes'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({}, swift=self.fake_swift)
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_process_equal_to_processes(self):
vals = {
'processes': 5,
'process': 5,
}
# from config
x = expirer.ObjectExpirer(vals, swift=self.fake_swift)
expected_msg = 'process must be less than processes'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({}, swift=self.fake_swift)
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
with self.assertRaises(ValueError):
expirer.ObjectExpirer(conf, swift=self.fake_swift)
conf = {
'concurrency': -1,
}
with self.assertRaises(ValueError):
expirer.ObjectExpirer(conf, swift=self.fake_swift)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf, swift):
super(ObjectExpirer, self).__init__(conf, swift=swift)
self.processes = 3
self.deleted_objects = {}
def delete_object(self, target_path, delete_timestamp,
task_account, task_container, task_object,
is_async_delete):
if task_container not in self.deleted_objects:
self.deleted_objects[task_container] = set()
self.deleted_objects[task_container].add(task_object)
x = ObjectExpirer(self.conf, swift=self.fake_swift)
deleted_objects = defaultdict(set)
for i in range(3):
x.process = i
# reset progress so we know we don't double-up work among processes
x.deleted_objects = defaultdict(set)
x.run_once()
for task_container, deleted in x.deleted_objects.items():
self.assertFalse(deleted_objects[task_container] & deleted)
deleted_objects[task_container] |= deleted
# sort for comparison
deleted_objects = {
con: sorted(o_set) for con, o_set in deleted_objects.items()}
expected = {
self.past_time: [
self.past_time + '-' + target_path
for target_path in self.expired_target_paths[self.past_time]],
self.just_past_time: [
self.just_past_time + '-' + target_path
for target_path
in self.expired_target_paths[self.just_past_time]]}
self.assertEqual(deleted_objects, expected)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=self.fake_swift)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
account = 'account'
container = 'container'
obj = 'obj'
http_exc = {
resp_code:
internal_client.UnexpectedResponse(
str(resp_code), swob.HTTPException(status=resp_code))
for resp_code in {404, 412, 500}
}
exc_other = Exception()
def check_call_to_delete_object(exc, ts, should_pop):
x.logger.clear()
start_reports = x.report_objects
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, account, container, obj,
False)
delete_actual.assert_called_once_with(actual_obj, ts, False)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(account, container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
self.assertFalse(pop_queue.called)
self.assertEqual(start_reports, x.report_objects)
self.assertEqual(1, len(log_lines))
if isinstance(exc, internal_client.UnexpectedResponse):
self.assertEqual(
log_lines[0],
'Unexpected response while deleting object '
'account container obj: %s' % exc.resp.status_int)
else:
self.assertTrue(log_lines[0].startswith(
'Exception while deleting object '
'account container obj'))
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
(http_exc[404], timestamp, False),
(http_exc[412], timestamp, False),
(http_exc[500], reclaim_ts, False),
(exc_other, reclaim_ts, False),
(http_exc[404], reclaim_ts, True),
(http_exc[412], reclaim_ts, True)]:
try:
check_call_to_delete_object(exc, ts, should_pop)
except AssertionError as err:
self.fail("Failed on %r at %f: %s" % (exc, ts, err))
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=self.fake_swift)
x.report()
self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
self.assertTrue(
'completed' in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue(
'completed' not in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_parse_task_obj(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
def assert_parse_task_obj(task_obj, expected_delete_at,
expected_account, expected_container,
expected_obj):
delete_at, account, container, obj = x.parse_task_obj(task_obj)
self.assertEqual(delete_at, expected_delete_at)
self.assertEqual(account, expected_account)
self.assertEqual(container, expected_container)
self.assertEqual(obj, expected_obj)
assert_parse_task_obj('0000-a/c/o', 0, 'a', 'c', 'o')
assert_parse_task_obj('0001-a/c/o', 1, 'a', 'c', 'o')
assert_parse_task_obj('1000-a/c/o', 1000, 'a', 'c', 'o')
assert_parse_task_obj('0000-acc/con/obj', 0, 'acc', 'con', 'obj')
def make_task(self, delete_at, target, is_async_delete=False):
return {
'task_account': '.expiring_objects',
'task_container': delete_at,
'task_object': delete_at + '-' + target,
'delete_timestamp': Timestamp(delete_at),
'target_path': target,
'is_async_delete': is_async_delete,
}
def test_round_robin_order(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# sorted by popping one object to delete for each target_container
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# task containers have some task objects with invalid target paths
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'invalid0'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'invalid1'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
self.make_task('0002', 'invalid2'),
]
result = list(x.round_robin_order(task_con_obj_list))
# the invalid task objects are ignored
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# for a given target container, tasks won't necessarily all go in
# the same timestamp container
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c2/o3'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c0/o2'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# so we go around popping by *target* container, not *task* container
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0000', 'a/c2/o3'),
self.make_task('0001', 'a/c0/o2'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# all of the work to be done could be for different target containers
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o'),
self.make_task('0000', 'a/c1/o'),
self.make_task('0000', 'a/c2/o'),
self.make_task('0000', 'a/c3/o'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c4/o'),
self.make_task('0001', 'a/c5/o'),
self.make_task('0001', 'a/c6/o'),
self.make_task('0001', 'a/c7/o'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c8/o'),
self.make_task('0002', 'a/c9/o'),
]
result = list(x.round_robin_order(task_con_obj_list))
# in which case, we kind of hammer the task containers
self.assertEqual(task_con_obj_list, result)
def test_hash_mod(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
mod_count = [0, 0, 0]
for i in range(1000):
name = 'obj%d' % i
mod = x.hash_mod(name, 3)
mod_count[mod] += 1
# 1000 names are well shuffled
self.assertGreater(mod_count[0], 300)
self.assertGreater(mod_count[1], 300)
self.assertGreater(mod_count[2], 300)
def test_iter_task_accounts_to_expire(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 0, 1)])
self.conf['processes'] = '2'
self.conf['process'] = '1'
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 1, 2)])
def test_delete_at_time_of_task_container(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
self.assertEqual(x.delete_at_time_of_task_container('0000'), 0)
self.assertEqual(x.delete_at_time_of_task_container('0001'), 1)
self.assertEqual(x.delete_at_time_of_task_container('1000'), 1000)
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.get_lines_for_level('error'),
["Unhandled exception: "])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
with mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(
self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'4 possible containers; 12 possible objects',
'Pass completed in 0s; 10 objects expired',
])
def test_run_once_rate_limited(self):
x = expirer.ObjectExpirer(
dict(self.conf, tasks_per_second=2),
logger=self.logger,
swift=self.fake_swift)
x.pop_queue = lambda a, c, o: None
calls = []
def fake_ratelimiter(iterator, elements_per_second):
captured_iter = list(iterator)
calls.append((captured_iter, elements_per_second))
return captured_iter
with mock.patch('swift.obj.expirer.RateLimitedIterator',
side_effect=fake_ratelimiter):
x.run_once()
self.assertEqual(calls, [([
self.make_task(self.past_time, target_path)
for target_path in self.expired_target_paths[self.past_time]
] + [
self.make_task(self.just_past_time, target_path)
for target_path in self.expired_target_paths[self.just_past_time]
], 2)])
def test_skip_task_account_without_task_container(self):
fake_swift = FakeInternalClient({
# task account has no containers
'.expiring_objects': dict()
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass completed in 0s; 0 objects expired',
])
def test_iter_task_to_expire(self):
# In this test, all tasks are assigned to the tested expirer
my_index = 0
divisor = 1
# empty container gets deleted inline
task_account_container_list = [('.expiring_objects', self.empty_time)]
with mock.patch.object(self.expirer.swift, 'delete_container') \
as mock_delete_container:
self.assertEqual(
list(self.expirer.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
[])
self.assertEqual(mock_delete_container.mock_calls, [
mock.call('.expiring_objects', self.empty_time,
acceptable_statuses=(2, 404, 409))])
task_account_container_list = [('.expiring_objects', self.past_time)]
expected = [
self.make_task(self.past_time, target_path)
for target_path in self.expired_target_paths[self.past_time]]
with mock.patch.object(self.expirer.swift, 'delete_container') \
as mock_delete_container:
self.assertEqual(
list(self.expirer.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
# not empty; not deleted
self.assertEqual(mock_delete_container.mock_calls, [])
# the task queue has invalid task object
invalid_aco_dict = deepcopy(self.fake_swift.aco_dict)
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
0, self.past_time + '-invalid0')
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
5, self.past_time + '-invalid1')
invalid_fake_swift = FakeInternalClient(invalid_aco_dict)
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=invalid_fake_swift)
# but the invalid tasks are skipped
self.assertEqual(
list(x.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
# test some of that async delete
async_delete_aco_dict = {
'.expiring_objects': {
# this task container will be checked
self.past_time: [
# tasks ready for execution
{'name': self.past_time + '-a0/c0/o0',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a1/c1/o1',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a2/c2/o2',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a3/c3/o3',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a4/c4/o4',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a5/c5/o5',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a6/c6/o6',
'content_type': 'application/async-deleted'},
{'name': self.past_time + '-a7/c7/o7',
'content_type': 'application/async-deleted'},
# task objects for unicode test
{'name': self.past_time + u'-a8/c8/o8\u2661',
'content_type': 'application/async-deleted'},
{'name': self.past_time + u'-a9/c9/o9\xf8',
'content_type': 'application/async-deleted'},
]
}
}
async_delete_fake_swift = FakeInternalClient(async_delete_aco_dict)
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=async_delete_fake_swift)
expected = [
self.make_task(self.past_time, target_path,
is_async_delete=True)
for target_path in (
self.expired_target_paths[self.past_time] +
self.expired_target_paths[self.just_past_time])]
self.assertEqual(
list(x.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
def test_run_once_unicode_problem(self):
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
# 3 DELETE requests for each 10 executed task objects to pop_queue
code_list = [200] * 3 * 10
with mocked_http_conn(*code_list, give_connect=capture_requests):
self.expirer.run_once()
self.assertEqual(len(requests), 30)
def test_container_timestamp_break(self):
with mock.patch.object(self.fake_swift, 'iter_objects') as mock_method:
self.expirer.run_once()
# iter_objects is called only for past_time, not future_time
self.assertEqual(mock_method.call_args_list, [
mock.call('.expiring_objects', self.empty_time),
mock.call('.expiring_objects', self.past_time),
mock.call('.expiring_objects', self.just_past_time)])
def test_object_timestamp_break(self):
with mock.patch.object(self.expirer, 'delete_actual_object') \
as mock_method, \
mock.patch.object(self.expirer, 'pop_queue'):
self.expirer.run_once()
# executed tasks are with past time
self.assertEqual(
mock_method.call_args_list,
[mock.call(target_path, self.past_time, False)
for target_path in self.expired_target_paths[self.past_time]] +
[mock.call(target_path, self.just_past_time, False)
for target_path
in self.expired_target_paths[self.just_past_time]])
def test_failed_delete_keeps_entry(self):
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
# any tasks are not done
with mock.patch.object(self.expirer, 'delete_actual_object',
deliberately_blow_up), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
# no tasks are popped from the queue
self.assertEqual(mock_method.call_args_list, [])
# all tasks are done
with mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t, b: None), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
# all tasks are popped from the queue
self.assertEqual(
mock_method.call_args_list,
[mock.call('.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_paths[self.past_time]] +
[mock.call('.expiring_objects', self.just_past_time,
self.just_past_time + '-' + target_path)
for target_path
in self.expired_target_paths[self.just_past_time]])
def test_success_gets_counted(self):
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0), \
mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t, b: None), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
def test_delete_actual_object_gets_native_string(self):
got_str = [False]
def delete_actual_object_test_for_string(actual_obj, timestamp,
is_async_delete):
if isinstance(actual_obj, str):
got_str[0] = True
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch.object(self.expirer, 'delete_actual_object',
delete_actual_object_test_for_string), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
self.assertTrue(got_str[0])
def test_failed_delete_continues_on(self):
def fail_delete_container(*a, **kw):
raise Exception('failed to delete container')
def fail_delete_actual_object(actual_obj, timestamp, is_async_delete):
if timestamp == self.just_past_time:
raise Exception('failed to delete actual object')
with mock.patch.object(self.fake_swift, 'delete_container',
fail_delete_container), \
mock.patch.object(self.expirer, 'delete_actual_object',
fail_delete_actual_object), \
mock.patch.object(self.expirer, 'pop_queue') as mock_pop:
self.expirer.run_once()
error_lines = self.expirer.logger.get_lines_for_level('error')
self.assertEqual(error_lines, [
'Exception while deleting container .expiring_objects %s failed '
'to delete container: ' % self.empty_time
] + [
'Exception while deleting object %s %s %s '
'failed to delete actual object: ' % (
'.expiring_objects', self.just_past_time,
self.just_past_time + '-' + target_path)
for target_path in self.expired_target_paths[self.just_past_time]
])
self.assertEqual(self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'4 possible containers; 12 possible objects',
'Pass completed in 0s; 5 objects expired',
])
self.assertEqual(mock_pop.mock_calls, [
mock.call('.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_paths[self.past_time]
])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer(
{'__file__': 'unit_test', 'interval': interval},
swift=self.fake_swift)
with mock.patch.object(expirer, 'random', not_random), \
mock.patch.object(expirer, 'sleep', not_sleep), \
self.assertRaises(SystemExit) as caught:
x.run_once = raise_system_exit
x.run_forever()
self.assertEqual(str(caught.exception), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=self.fake_swift)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
self.assertEqual(str(err), 'exiting exception 2')
finally:
expirer.sleep = orig_sleep
self.assertEqual(x.logger.get_lines_for_level('error'),
['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'exception 1')
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
ts = Timestamp('1234')
x.delete_actual_object('path/to/object', ts, False)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(
got_env[0]['HTTP_X_BACKEND_CLEAN_EXPIRING_OBJECT_QUEUE'], 'no')
def test_delete_actual_object_bulk(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
ts = Timestamp('1234')
x.delete_actual_object('path/to/object', ts, True)
self.assertNotIn('HTTP_X_IF_DELETE_AT', got_env[0])
self.assertNotIn('HTTP_X_BACKEND_CLEAN_EXPIRING_OBJECT_QUEUE',
got_env[0])
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'], ts.internal)
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
ts = Timestamp('1234')
x.delete_actual_object('path/to/object name', ts, False)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_async_returns_expected_error(self):
def do_test(test_status, should_raise):
calls = [0]
def fake_app(env, start_response):
calls[0] += 1
calls.append(env['PATH_INFO'])
start_response(test_status, [('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
ts = Timestamp('1234')
if should_raise:
with self.assertRaises(internal_client.UnexpectedResponse):
x.delete_actual_object('path/to/object', ts, True)
else:
x.delete_actual_object('path/to/object', ts, True)
self.assertEqual(calls[0], 1, calls)
# object was deleted and tombstone reaped
do_test('404 Not Found', False)
# object was overwritten *after* the original delete, or
# object was deleted but tombstone still exists, or ...
do_test('409 Conflict', False)
# Anything else, raise
do_test('400 Bad Request', True)
def test_delete_actual_object_returns_expected_error(self):
def do_test(test_status, should_raise):
calls = [0]
def fake_app(env, start_response):
calls[0] += 1
start_response(test_status, [('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
ts = Timestamp('1234')
if should_raise:
with self.assertRaises(internal_client.UnexpectedResponse):
x.delete_actual_object('path/to/object', ts, False)
else:
x.delete_actual_object('path/to/object', ts, False)
self.assertEqual(calls[0], 1)
# object was deleted and tombstone reaped
do_test('404 Not Found', True)
# object was overwritten *after* the original expiration, or
do_test('409 Conflict', False)
# object was deleted but tombstone still exists, or
# object was overwritten ahead of the original expiration, or
# object was POSTed to with a new (or no) expiration, or ...
do_test('412 Precondition Failed', True)
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(fake_app))
exc = None
try:
x.delete_actual_object('path/to/object', Timestamp('1234'), False)
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name/should get/quoted'
timestamp = Timestamp('1366063156.863045')
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(self.fake_swift))
x.swift.make_request = mock.Mock()
x.swift.make_request.return_value.status_int = 204
x.swift.make_request.return_value.app_iter = []
x.delete_actual_object(name, timestamp, False)
self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.parse.quote(name))
def test_delete_actual_object_queue_cleaning(self):
name = 'acc/cont/something'
timestamp = Timestamp('1515544858.80602')
x = expirer.ObjectExpirer({}, swift=self.make_fake_ic(self.fake_swift))
x.swift.make_request = mock.MagicMock(
return_value=swob.HTTPNoContent())
x.delete_actual_object(name, timestamp, False)
self.assertEqual(x.swift.make_request.call_count, 1)
header = 'X-Backend-Clean-Expiring-Object-Queue'
self.assertEqual(
x.swift.make_request.call_args[0][2].get(header),
'no')
def test_pop_queue(self):
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=FakeInternalClient({}))
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('a', 'c', 'o')
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
def test_build_task_obj_round_trip(self):
ts = next(self.ts)
a = 'a1'
c = 'c2'
o = 'obj1'
args = (ts, a, c, o)
self.assertEqual(args, expirer.parse_task_obj(
expirer.build_task_obj(ts, a, c, o)))
self.assertEqual(args, expirer.parse_task_obj(
expirer.build_task_obj(ts, a, c, o, high_precision=True)))
ts = Timestamp(next(self.ts), delta=1234)
a = u'\N{SNOWMAN}'
c = u'\N{SNOWFLAKE}'
o = u'\U0001F334'
args = (ts, a, c, o)
self.assertNotEqual(args, expirer.parse_task_obj(
expirer.build_task_obj(ts, a, c, o)))
self.assertEqual(args, expirer.parse_task_obj(
expirer.build_task_obj(ts, a, c, o, high_precision=True)))
if __name__ == '__main__':
main()
| swift-master | test/unit/obj/test_expirer.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import six.moves.cPickle as pickle
from six.moves.queue import PriorityQueue
import mock
import os
import unittest
import random
import itertools
from collections import Counter
from contextlib import closing
from gzip import GzipFile
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.exceptions import ConnectionTimeout
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import (
make_timestamp_iter, patch_policies, mocked_http_conn)
from time import time
from distutils.dir_util import mkpath
from eventlet import spawn, Timeout
from swift.obj import updater as object_updater
from swift.obj.diskfile import (
ASYNCDIR_BASE, get_async_dir, DiskFileManager, get_tmp_dir)
from swift.common.ring import RingData
from swift.common import utils
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import bytes_to_wsgi
from swift.common.utils import hash_path, normalize_timestamp, mkdirs
from swift.common.storage_policy import StoragePolicy, POLICIES
class MockPool(object):
def __init__(self, *a, **kw):
pass
def spawn(self, func, *args, **kwargs):
func(*args, **kwargs)
def waitall(self):
pass
def __enter__(self):
return self
def __exit__(self, *a, **kw):
pass
_mocked_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)]
@patch_policies(_mocked_policies)
class TestObjectUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
self.testdir = mkdtemp()
ring_file = os.path.join(self.testdir, 'container.ring.gz')
with closing(GzipFile(ring_file, 'wb')) as f:
pickle.dump(
RingData([[0, 1, 2, 0, 1, 2],
[1, 2, 0, 1, 2, 0],
[2, 3, 1, 2, 3, 1]],
[{'id': 0, 'ip': '127.0.0.2', 'port': 1,
'replication_ip': '127.0.0.1',
# replication_port may be overridden in tests but
# include here for completeness...
'replication_port': 67890,
'device': 'sda1', 'zone': 0},
{'id': 1, 'ip': '127.0.0.2', 'port': 1,
'replication_ip': '127.0.0.1',
'replication_port': 67890,
'device': 'sda1', 'zone': 2},
{'id': 2, 'ip': '127.0.0.2', 'port': 1,
'replication_ip': '127.0.0.1',
'replication_port': 67890,
'device': 'sda1', 'zone': 4},
{'id': 3, 'ip': '127.0.0.2', 'port': 1,
'replication_ip': '127.0.0.1',
'replication_port': 67890,
'device': 'sda1', 'zone': 6}], 30),
f)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
for policy in POLICIES:
os.mkdir(os.path.join(self.sda1, get_tmp_dir(policy)))
self.logger = debug_logger()
self.ts_iter = make_timestamp_iter()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_creation(self):
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '2',
'node_timeout': '5.5'})
self.assertTrue(hasattr(ou, 'logger'))
self.assertTrue(ou.logger is not None)
self.assertEqual(ou.devices, self.devices_dir)
self.assertEqual(ou.interval, 1)
self.assertEqual(ou.concurrency, 2)
self.assertEqual(ou.node_timeout, 5.5)
self.assertTrue(ou.get_container_ring() is not None)
def test_conf_params(self):
# defaults
daemon = object_updater.ObjectUpdater({}, logger=self.logger)
self.assertEqual(daemon.devices, '/srv/node')
self.assertEqual(daemon.mount_check, True)
self.assertEqual(daemon.swift_dir, '/etc/swift')
self.assertEqual(daemon.interval, 300)
self.assertEqual(daemon.concurrency, 8)
self.assertEqual(daemon.updater_workers, 1)
self.assertEqual(daemon.max_objects_per_second, 50.0)
self.assertEqual(daemon.max_objects_per_container_per_second, 0.0)
self.assertEqual(daemon.per_container_ratelimit_buckets, 1000)
self.assertEqual(daemon.max_deferred_updates, 10000)
# non-defaults
conf = {
'devices': '/some/where/else',
'mount_check': 'huh?',
'swift_dir': '/not/here',
'interval': '600.1',
'concurrency': '2',
'updater_workers': '3',
'objects_per_second': '10.5',
'max_objects_per_container_per_second': '1.2',
'per_container_ratelimit_buckets': '100',
'max_deferred_updates': '0',
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.assertEqual(daemon.devices, '/some/where/else')
self.assertEqual(daemon.mount_check, False)
self.assertEqual(daemon.swift_dir, '/not/here')
self.assertEqual(daemon.interval, 600.1)
self.assertEqual(daemon.concurrency, 2)
self.assertEqual(daemon.updater_workers, 3)
self.assertEqual(daemon.max_objects_per_second, 10.5)
self.assertEqual(daemon.max_objects_per_container_per_second, 1.2)
self.assertEqual(daemon.per_container_ratelimit_buckets, 100)
self.assertEqual(daemon.max_deferred_updates, 0)
# check deprecated option
daemon = object_updater.ObjectUpdater({'slowdown': '0.04'},
logger=self.logger)
self.assertEqual(daemon.max_objects_per_second, 20.0)
def check_bad(conf):
with self.assertRaises(ValueError):
object_updater.ObjectUpdater(conf, logger=self.logger)
check_bad({'interval': 'foo'})
check_bad({'concurrency': 'bar'})
check_bad({'concurrency': '1.0'})
check_bad({'slowdown': 'baz'})
check_bad({'objects_per_second': 'quux'})
check_bad({'max_objects_per_container_per_second': '-0.1'})
check_bad({'max_objects_per_container_per_second': 'auto'})
check_bad({'per_container_ratelimit_buckets': '1.2'})
check_bad({'per_container_ratelimit_buckets': '0'})
check_bad({'per_container_ratelimit_buckets': '-1'})
check_bad({'per_container_ratelimit_buckets': 'auto'})
check_bad({'max_deferred_updates': '-1'})
check_bad({'max_deferred_updates': '1.1'})
check_bad({'max_deferred_updates': 'auto'})
@mock.patch('os.listdir')
def test_listdir_with_exception(self, mock_listdir):
e = OSError('permission_denied')
mock_listdir.side_effect = e
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
paths = daemon._listdir('foo/bar')
self.assertEqual([], paths)
log_lines = self.logger.get_lines_for_level('error')
msg = ('ERROR: Unable to access foo/bar: permission_denied')
self.assertEqual(log_lines[0], msg)
@mock.patch('os.listdir', return_value=['foo', 'bar'])
def test_listdir_without_exception(self, mock_listdir):
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
path = daemon._listdir('foo/bar/')
log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 0)
self.assertEqual(path, ['foo', 'bar'])
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_object_sweep(self, mock_recon):
def check_with_idx(policy_index, warn, should_skip):
if int(policy_index) > 0:
asyncdir = os.path.join(self.sda1,
ASYNCDIR_BASE + "-" + policy_index)
else:
asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE)
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
# A non-directory where directory is expected should just be
# skipped, but should not stop processing of subsequent
# directories.
not_dirs = (
os.path.join(self.sda1, 'not_a_dir'),
os.path.join(self.sda1,
ASYNCDIR_BASE + '-' + 'twentington'),
os.path.join(self.sda1,
ASYNCDIR_BASE + '-' + str(
int(policy_index) + 100)))
for not_dir in not_dirs:
with open(not_dir, 'w'):
pass
objects = {
'a': [1089.3, 18.37, 12.83, 1.3],
'b': [49.4, 49.3, 49.2, 49.1],
'c': [109984.123],
}
expected = set()
for o, timestamps in objects.items():
ohash = hash_path('account', 'container', o)
for t in timestamps:
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
if t == timestamps[0]:
expected.add((o_path, int(policy_index)))
self._write_dummy_pickle(o_path, 'account', 'container', o)
seen = set()
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, policy, **kwargs):
seen.add((update_path, int(policy)))
os.unlink(update_path)
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '5'})
ou.logger = mock_logger = mock.MagicMock()
ou.object_sweep(self.sda1)
self.assertEqual(mock_logger.warning.call_count, warn)
self.assertTrue(
os.path.exists(os.path.join(self.sda1, 'not_a_dir')))
if should_skip:
# if we were supposed to skip over the dir, we didn't process
# anything at all
self.assertEqual(set(), seen)
else:
self.assertEqual(expected, seen)
# test cleanup: the tempdir gets cleaned up between runs, but this
# way we can be called multiple times in a single test method
for not_dir in not_dirs:
os.unlink(not_dir)
# first check with valid policies
for pol in POLICIES:
check_with_idx(str(pol.idx), 0, should_skip=False)
# now check with a bogus async dir policy and make sure we get
# a warning indicating that the '99' policy isn't valid
check_with_idx('99', 1, should_skip=True)
def test_sweep_logs(self):
asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE)
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
for o, t in [('abc', 123), ('def', 234), ('ghi', 345),
('jkl', 456), ('mno', 567)]:
ohash = hash_path('account', 'container', o)
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
self._write_dummy_pickle(o_path, 'account', 'container', o)
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, **kwargs):
os.unlink(update_path)
self.stats.successes += 1
self.stats.unlinks += 1
logger = debug_logger()
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'report_interval': '10.0',
'node_timeout': '5'}, logger=logger)
now = [time()]
def mock_time_function():
rv = now[0]
now[0] += 4
return rv
# With 10s between updates, time() advancing 4s every time we look,
# and 5 async_pendings on disk, we should get at least two progress
# lines. (time is incremented by 4 each time the update app iter yields
# and each time the elapsed time is sampled)
with mock.patch('swift.obj.updater.time',
mock.MagicMock(time=mock_time_function)), \
mock.patch.object(object_updater, 'ContextPool', MockPool):
ou.object_sweep(self.sda1)
info_lines = logger.get_lines_for_level('info')
self.assertEqual(4, len(info_lines))
self.assertIn("sweep starting", info_lines[0])
self.assertIn(self.sda1, info_lines[0])
self.assertIn("sweep progress", info_lines[1])
# the space ensures it's a positive number
self.assertIn(
"2 successes, 0 failures, 0 quarantines, 2 unlinks, 0 errors, "
"0 redirects",
info_lines[1])
self.assertIn(self.sda1, info_lines[1])
self.assertIn("sweep progress", info_lines[2])
self.assertIn(
"4 successes, 0 failures, 0 quarantines, 4 unlinks, 0 errors, "
"0 redirects",
info_lines[2])
self.assertIn(self.sda1, info_lines[2])
self.assertIn("sweep complete", info_lines[3])
self.assertIn(
"5 successes, 0 failures, 0 quarantines, 5 unlinks, 0 errors, "
"0 redirects",
info_lines[3])
self.assertIn(self.sda1, info_lines[3])
def test_sweep_logs_multiple_policies(self):
for policy in _mocked_policies:
asyncdir = os.path.join(self.sda1, get_async_dir(policy.idx))
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
for o, t in [('abc', 123), ('def', 234), ('ghi', 345)]:
ohash = hash_path('account', 'container%d' % policy.idx, o)
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
self._write_dummy_pickle(o_path, 'account', 'container', o)
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, **kwargs):
os.unlink(update_path)
self.stats.successes += 1
self.stats.unlinks += 1
logger = debug_logger()
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'report_interval': '10.0',
'node_timeout': '5'}, logger=logger)
now = [time()]
def mock_time():
rv = now[0]
now[0] += 0.01
return rv
with mock.patch('swift.obj.updater.time',
mock.MagicMock(time=mock_time)):
ou.object_sweep(self.sda1)
completion_lines = [l for l in logger.get_lines_for_level('info')
if "sweep complete" in l]
self.assertEqual(len(completion_lines), 1)
self.assertIn("sweep complete", completion_lines[0])
self.assertIn(
"6 successes, 0 failures, 0 quarantines, 6 unlinks, 0 errors, "
"0 redirects",
completion_lines[0])
@mock.patch.object(object_updater, 'check_drive')
def test_run_once_with_disk_unmounted(self, mock_check_drive):
mock_check_drive.side_effect = ValueError
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'})
ou.run_once()
async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
# each run calls check_device
self.assertEqual([
mock.call(self.devices_dir, 'sda1', False),
mock.call(self.devices_dir, 'sda1', False),
], mock_check_drive.mock_calls)
mock_check_drive.reset_mock()
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'TrUe',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
odd_dir = os.path.join(async_dir, 'not really supposed '
'to be here')
os.mkdir(odd_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
self.assertTrue(os.path.exists(odd_dir)) # skipped - not mounted!
self.assertEqual([
mock.call(self.devices_dir, 'sda1', True),
], mock_check_drive.mock_calls)
self.assertEqual(ou.logger.statsd_client.get_increment_counts(), {})
@mock.patch('swift.obj.updater.dump_recon_cache')
@mock.patch.object(object_updater, 'check_drive')
def test_run_once(self, mock_check_drive, mock_dump_recon):
mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
ou.run_once()
self.assertEqual([], ou.logger.get_lines_for_level('error'))
async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
# each run calls check_device
self.assertEqual([
mock.call(self.devices_dir, 'sda1', False),
mock.call(self.devices_dir, 'sda1', False),
], mock_check_drive.mock_calls)
mock_check_drive.reset_mock()
self.assertEqual([], ou.logger.get_lines_for_level('error'))
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'TrUe',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
odd_dir = os.path.join(async_dir, 'not really supposed '
'to be here')
os.mkdir(odd_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
self.assertEqual([
mock.call(self.devices_dir, 'sda1', True),
], mock_check_drive.mock_calls)
self.assertEqual([], ou.logger.get_lines_for_level('error'))
ohash = hash_path('a', 'c', 'o')
odir = os.path.join(async_dir, ohash[-3:])
mkdirs(odir)
older_op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time() - 1)))
op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time())))
for path in (op_path, older_op_path):
with open(path, 'wb') as async_pending:
pickle.dump({'op': 'PUT', 'account': 'a',
'container': 'c',
'obj': 'o', 'headers': {
'X-Container-Timestamp':
normalize_timestamp(0)}},
async_pending)
ou.run_once()
self.assertTrue(not os.path.exists(older_op_path))
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'failures': 1, 'unlinks': 1})
self.assertIsNone(pickle.load(open(op_path, 'rb')).get('successes'))
self.assertEqual(
['ERROR with remote server 127.0.0.1:67890/sda1: '
'Connection refused'] * 3,
ou.logger.get_lines_for_level('error'))
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([(('updater.timing.status.500', mock.ANY), {}), ] * 3))
ou.logger.clear()
bindsock = listen_zero()
def accepter(sock, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
headers = HeaderKeyDict()
line = bytes_to_wsgi(inc.readline())
while line and line != '\r\n':
headers[line.split(':')[0]] = \
line.split(':')[1].strip()
line = bytes_to_wsgi(inc.readline())
self.assertIn('x-container-timestamp', headers)
self.assertIn('X-Backend-Storage-Policy-Index',
headers)
except BaseException as err:
return err
return None
def accept(return_codes):
try:
events = []
for code in return_codes:
with Timeout(3):
sock, addr = bindsock.accept()
events.append(
spawn(accepter, sock, code))
for event in events:
err = event.wait()
if err:
raise err
except BaseException as err:
return err
return None
# only 1/3 updates succeeds
event = spawn(accept, [201, 500, 500])
for dev in ou.get_container_ring().devs:
if dev is not None:
dev['replication_port'] = bindsock.getsockname()[1]
ou.logger._clear()
ou.run_once()
err = event.wait()
if err:
raise err
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'failures': 1})
self.assertEqual([0],
pickle.load(open(op_path, 'rb')).get('successes'))
self.assertEqual([], ou.logger.get_lines_for_level('error'))
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([
(('updater.timing.status.201', mock.ANY), {}),
(('updater.timing.status.500', mock.ANY), {}),
(('updater.timing.status.500', mock.ANY), {}),
]))
# only 1/2 updates succeeds
event = spawn(accept, [404, 201])
ou.logger.clear()
ou.run_once()
err = event.wait()
if err:
raise err
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'failures': 1})
self.assertEqual([0, 2],
pickle.load(open(op_path, 'rb')).get('successes'))
self.assertEqual([], ou.logger.get_lines_for_level('error'))
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([
(('updater.timing.status.404', mock.ANY), {}),
(('updater.timing.status.201', mock.ANY), {}),
]))
# final update has Timeout
ou.logger.clear()
with Timeout(99) as exc, \
mock.patch('swift.obj.updater.http_connect') as mock_connect:
mock_connect.return_value.getresponse.side_effect = exc
ou.run_once()
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'failures': 1})
self.assertEqual([0, 2],
pickle.load(open(op_path, 'rb')).get('successes'))
self.assertEqual([], ou.logger.get_lines_for_level('error'))
self.assertIn(
'Timeout waiting on remote server 127.0.0.1:%d/sda1: 99 seconds'
% bindsock.getsockname()[1], ou.logger.get_lines_for_level('info'))
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([
(('updater.timing.status.499', mock.ANY), {})]))
# final update has ConnectionTimeout
ou.logger.clear()
with ConnectionTimeout(9) as exc, \
mock.patch('swift.obj.updater.http_connect') as mock_connect:
mock_connect.return_value.getresponse.side_effect = exc
ou.run_once()
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'failures': 1})
self.assertEqual([0, 2],
pickle.load(open(op_path, 'rb')).get('successes'))
self.assertEqual([], ou.logger.get_lines_for_level('error'))
self.assertIn(
'Timeout connecting to remote server 127.0.0.1:%d/sda1: 9 seconds'
% bindsock.getsockname()[1], ou.logger.get_lines_for_level('info'))
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([
(('updater.timing.status.500', mock.ANY), {})
]))
# final update succeeds
event = spawn(accept, [201])
ou.logger.clear()
ou.run_once()
err = event.wait()
if err:
raise err
# we remove the async_pending and its containing suffix dir, but not
# anything above that
self.assertFalse(os.path.exists(op_path))
self.assertFalse(os.path.exists(os.path.dirname(op_path)))
self.assertTrue(os.path.exists(os.path.dirname(os.path.dirname(
op_path))))
self.assertEqual([], ou.logger.get_lines_for_level('error'))
self.assertEqual(ou.logger.statsd_client.get_increment_counts(),
{'unlinks': 1, 'successes': 1})
self.assertEqual(
sorted(ou.logger.statsd_client.calls['timing']),
sorted([
(('updater.timing.status.201', mock.ANY), {}),
]))
def test_obj_put_legacy_updates(self):
ts = (normalize_timestamp(t) for t in
itertools.count(int(time())))
policy = POLICIES.get_by_index(0)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
async_dir = os.path.join(self.sda1, get_async_dir(policy))
os.mkdir(async_dir)
account, container, obj = 'a', 'c', 'o'
# write an async
for op in ('PUT', 'DELETE'):
self.logger.clear()
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
dfmanager = DiskFileManager(conf, daemon.logger)
# don't include storage-policy-index in headers_out pickle
headers_out = HeaderKeyDict({
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': next(ts),
})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, next(ts), policy)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [200, 200, 200]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, op)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
str(int(policy)))
self.assertEqual(
daemon.logger.statsd_client.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
def _write_async_update(self, dfmanager, timestamp, policy,
headers=None, container_path=None):
# write an async
account, container, obj = 'a', 'c', 'o'
op = 'PUT'
headers_out = headers or {
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': timestamp.internal,
'X-Backend-Storage-Policy-Index': int(policy),
'User-Agent': 'object-server %s' % os.getpid()
}
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
if container_path:
data['container_path'] = container_path
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, timestamp, policy)
def test_obj_put_async_updates(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
def do_test(headers_out, expected, container_path=None):
# write an async
dfmanager = DiskFileManager(conf, daemon.logger)
self._write_async_update(dfmanager, next(self.ts_iter),
policies[0], headers=headers_out,
container_path=container_path)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [
200, # object update success
200, # object update success
200, # object update conflict
]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, 'PUT')
self.assertDictEqual(expected, headers)
self.assertEqual(
daemon.logger.statsd_client.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
self.assertFalse(os.listdir(async_dir))
daemon.logger.clear()
ts = next(self.ts_iter)
# use a dict rather than HeaderKeyDict so we can vary the case of the
# pickled headers
headers_out = {
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': ts.normal,
'X-Backend-Storage-Policy-Index': int(policies[0]),
'User-Agent': 'object-server %s' % os.getpid()
}
expected = {
'X-Size': '0',
'X-Content-Type': 'text/plain',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
'X-Timestamp': ts.normal,
'X-Backend-Storage-Policy-Index': str(int(policies[0])),
'User-Agent': 'object-updater %s' % os.getpid(),
'X-Backend-Accept-Redirect': 'true',
'X-Backend-Accept-Quoted-Location': 'true',
}
# always expect X-Backend-Accept-Redirect and
# X-Backend-Accept-Quoted-Location to be true
do_test(headers_out, expected, container_path='.shards_a/shard_c')
do_test(headers_out, expected)
# ...unless they're already set
expected['X-Backend-Accept-Redirect'] = 'false'
expected['X-Backend-Accept-Quoted-Location'] = 'false'
headers_out_2 = dict(headers_out)
headers_out_2['X-Backend-Accept-Redirect'] = 'false'
headers_out_2['X-Backend-Accept-Quoted-Location'] = 'false'
do_test(headers_out_2, expected)
# updater should add policy header if missing
expected['X-Backend-Accept-Redirect'] = 'true'
expected['X-Backend-Accept-Quoted-Location'] = 'true'
headers_out['X-Backend-Storage-Policy-Index'] = None
do_test(headers_out, expected)
# updater should not overwrite a mismatched policy header
headers_out['X-Backend-Storage-Policy-Index'] = int(policies[1])
expected['X-Backend-Storage-Policy-Index'] = str(int(policies[1]))
do_test(headers_out, expected)
# check for case insensitivity
headers_out['user-agent'] = headers_out.pop('User-Agent')
headers_out['x-backend-storage-policy-index'] = headers_out.pop(
'X-Backend-Storage-Policy-Index')
do_test(headers_out, expected)
def _check_update_requests(self, requests, timestamp, policy):
# do some sanity checks on update request
expected_headers = {
'X-Size': '0',
'X-Content-Type': 'text/plain',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
'X-Timestamp': timestamp.internal,
'X-Backend-Storage-Policy-Index': str(int(policy)),
'User-Agent': 'object-updater %s' % os.getpid(),
'X-Backend-Accept-Redirect': 'true',
'X-Backend-Accept-Quoted-Location': 'true'}
for request in requests:
self.assertEqual('PUT', request['method'])
self.assertDictEqual(expected_headers, request['headers'])
def test_obj_put_async_root_update_redirected(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
# run once
ts_redirect_1 = next(self.ts_iter)
ts_redirect_2 = next(self.ts_iter)
fake_responses = [
# first round of update attempts, newest redirect should be chosen
(200, {}),
(301, {'Location': '/.shards_a/c_shard_new/o',
'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
(301, {'Location': '/.shards_a/c_shard_old/o',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
# second round of update attempts
(200, {}),
(200, {}),
(200, {}),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3 +
['/sda1/0/.shards_a/c_shard_new/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 1, 'successes': 1,
'unlinks': 1, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def test_obj_put_async_root_update_redirected_previous_success(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
with mocked_http_conn(
507, 200, 507) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'failures': 1, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(dict(orig_async_data, successes=[1]), async_data)
# run again - expect 3 redirected updates despite previous success
ts_redirect = next(self.ts_iter)
resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 2 + [(200, {})] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:2], ts_obj, policies[0])
self._check_update_requests(conn.requests[2:], ts_obj, policies[0])
root_part = daemon.container_ring.get_part('a/c')
shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
self.assertEqual(
['/sda1/%s/a/c/o' % root_part] * 2 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 1, 'successes': 1, 'failures': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def _check_async_file(self, async_dir):
async_subdirs = os.listdir(async_dir)
self.assertEqual([mock.ANY], async_subdirs)
async_files = os.listdir(os.path.join(async_dir, async_subdirs[0]))
self.assertEqual([mock.ANY], async_files)
async_path = os.path.join(
async_dir, async_subdirs[0], async_files[0])
with open(async_path, 'rb') as fd:
async_data = pickle.load(fd)
return async_path, async_data
def _check_obj_put_async_update_bad_redirect_headers(self, headers):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
fake_responses = [
(301, headers),
(301, headers),
(301, headers),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'failures': 1, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
# async file still intact
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(orig_async_data, async_data)
return daemon
def test_obj_put_async_root_update_missing_location_header(self):
headers = {
'X-Backend-Redirect-Timestamp': next(self.ts_iter).internal}
self._check_obj_put_async_update_bad_redirect_headers(headers)
def test_obj_put_async_root_update_bad_location_header(self):
headers = {
'Location': 'bad bad bad',
'X-Backend-Redirect-Timestamp': next(self.ts_iter).internal}
daemon = self._check_obj_put_async_update_bad_redirect_headers(headers)
error_lines = daemon.logger.get_lines_for_level('error')
self.assertIn('Container update failed', error_lines[0])
self.assertIn('Invalid path: bad%20bad%20bad', error_lines[0])
def test_obj_put_async_shard_update_redirected_twice(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0],
container_path='.shards_a/c_shard_older')
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
ts_redirect_1 = next(self.ts_iter)
ts_redirect_2 = next(self.ts_iter)
ts_redirect_3 = next(self.ts_iter)
fake_responses = [
# 1st round of redirects, newest redirect should be chosen
(301, {'Location': '/.shards_a/c_shard_old/o',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
(301, {'Location': '/.shards_a/c%5Fshard%5Fnew/o',
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
(301, {'Location': '/.shards_a/c%5Fshard%5Fold/o',
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
# 2nd round of redirects
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
# only *one* set of redirected requests is attempted per cycle
older_part = daemon.container_ring.get_part('.shards_a/c_shard_older')
new_part = daemon.container_ring.get_part('.shards_a/c_shard_new')
newer_part = daemon.container_ring.get_part('.shards_a/c_shard_newer')
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_older/o' % older_part] * 3 +
['/sda1/%s/.shards_a/c_shard_new/o' % new_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value added to data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_newer',
redirect_history=['.shards_a/c_shard_new',
'.shards_a/c_shard_newer']),
async_data)
# next cycle, should get latest redirect from pickled async update
fake_responses = [(200, {})] * 3
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_newer/o' % newer_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'successes': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def test_obj_put_async_update_redirection_loop(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
ts_redirect = next(self.ts_iter)
resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
resp_headers_2 = {'Location': '/.shards_a/c_shard_2/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_2)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
# only *one* set of redirected requests is attempted per cycle
root_part = daemon.container_ring.get_part('a/c')
shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
shard_2_part = daemon.container_ring.get_part('.shards_a/c_shard_2')
shard_3_part = daemon.container_ring.get_part('.shards_a/c_shard_3')
self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value added to data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_2',
redirect_history=['.shards_a/c_shard_1',
'.shards_a/c_shard_2']),
async_data)
# next cycle, more redirects! first is to previously visited location
resp_headers_3 = {'Location': '/.shards_a/c_shard_3/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
# first try the previously persisted container path, response to that
# creates a loop so ignore and send to root
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_2/o' % shard_2_part] * 3 +
['/sda1/%s/a/c/o' % root_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 4, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value from root added to persisted data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
# note: redirect_history was reset when falling back to root
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_3',
redirect_history=['.shards_a/c_shard_3']),
async_data)
# next cycle, more redirects! first is to a location visited previously
# but not since last fall back to root, so that location IS tried;
# second is to a location visited since last fall back to root so that
# location is NOT tried
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_3/o' % shard_3_part] * 3 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 6, 'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
# update failed, we still have pending file, but container_path is None
# because most recent redirect location was a repeat
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path=None,
redirect_history=[]),
async_data)
# next cycle, persisted container path is None so update should go to
# root, this time it succeeds
fake_responses = [(200, {})] * 3
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 6, 'successes': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.statsd_client.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def test_obj_update_quarantine(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
ohash = hash_path('a', 'c', 'o')
odir = os.path.join(async_dir, ohash[-3:])
mkdirs(odir)
op_path = os.path.join(
odir,
'%s-%s' % (ohash, next(self.ts_iter).internal))
with open(op_path, 'wb') as async_pending:
async_pending.write(b'\xff') # invalid pickle
with mocked_http_conn():
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self.assertEqual(
{'quarantines': 1},
daemon.logger.statsd_client.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no asyncs
def test_obj_update_gone_missing(self):
# if you've got multiple updaters running (say, both a background
# and foreground process), _load_update may get a file
# that doesn't exist
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
ohash = hash_path('a', 'c', 'o')
odir = os.path.join(async_dir, ohash[-3:])
mkdirs(odir)
op_path = os.path.join(
odir,
'%s-%s' % (ohash, next(self.ts_iter).internal))
self.assertEqual(os.listdir(async_dir), [ohash[-3:]])
self.assertFalse(os.listdir(odir))
with mocked_http_conn():
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon._load_update(self.sda1, op_path)
self.assertEqual(
{}, daemon.logger.statsd_client.get_increment_counts())
self.assertEqual(os.listdir(async_dir), [ohash[-3:]])
self.assertFalse(os.listdir(odir))
def _write_dummy_pickle(self, path, a, c, o, cp=None):
update = {
'op': 'PUT',
'account': a,
'container': c,
'obj': o,
'headers': {'X-Container-Timestamp': normalize_timestamp(0)}
}
if cp:
update['container_path'] = cp
with open(path, 'wb') as async_pending:
pickle.dump(update, async_pending)
def _make_async_pending_pickle(self, a, c, o, cp=None):
ohash = hash_path(a, c, o)
odir = os.path.join(self.async_dir, ohash[-3:])
mkdirs(odir)
path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time())))
self._write_dummy_pickle(path, a, c, o, cp)
def _find_async_pending_files(self):
found_files = []
for root, dirs, files in os.walk(self.async_dir):
found_files.extend(files)
return found_files
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit(self, mock_recon):
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 1,
'max_deferred_updates': 0, # do not re-iterate
'concurrency': 1
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
num_c1_files = 10
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
# make one more in a different container, with a container_path
self._make_async_pending_pickle('a', 'c2', obj_name,
cp='.shards_a/c2_shard')
c2_part, _ = daemon.get_container_ring().get_nodes('.shards_a',
'c2_shard')
expected_total = num_c1_files + 1
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
expected_success = 2
fake_status_codes = [200] * 3 * expected_success
with mocked_http_conn(*fake_status_codes) as fake_conn:
daemon.run_once()
self.assertEqual(expected_success, daemon.stats.successes)
expected_skipped = expected_total - expected_success
self.assertEqual(expected_skipped, daemon.stats.skips)
self.assertEqual(expected_skipped,
len(self._find_async_pending_files()))
self.assertEqual(
Counter(
'/'.join(req['path'].split('/')[:5])
for req in fake_conn.requests),
{'/sda1/%s/a/c1' % c1_part: 3,
'/sda1/%s/.shards_a/c2_shard' % c2_part: 3})
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('2 successes, 0 failures, 0 quarantines, 2 unlinks, '
'0 errors, 0 redirects, 9 skips, 9 deferrals, 0 drains',
info_lines[-1])
self.assertEqual({'skips': 9, 'successes': 2, 'unlinks': 2,
'deferrals': 9},
self.logger.statsd_client.get_increment_counts())
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit_unlimited(self, mock_recon):
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 0,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
num_c1_files = 10
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
# make one more in a different container, with a container_path
self._make_async_pending_pickle('a', 'c2', obj_name,
cp='.shards_a/c2_shard')
c2_part, _ = daemon.get_container_ring().get_nodes('.shards_a',
'c2_shard')
expected_total = num_c1_files + 1
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
fake_status_codes = [200] * 3 * expected_total
with mocked_http_conn(*fake_status_codes):
daemon.run_once()
self.assertEqual(expected_total, daemon.stats.successes)
self.assertEqual(0, daemon.stats.skips)
self.assertEqual([], self._find_async_pending_files())
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('11 successes, 0 failures, 0 quarantines, 11 unlinks, '
'0 errors, 0 redirects, 0 skips, 0 deferrals, 0 drains',
info_lines[-1])
self.assertEqual({'successes': 11, 'unlinks': 11},
self.logger.statsd_client.get_increment_counts())
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit_some_limited(self, mock_recon):
# simulate delays between buckets being fed so that only some updates
# are skipped
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 10,
'max_deferred_updates': 0, # do not re-iterate
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
# all updates for same container
num_c1_files = 4
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
expected_total = num_c1_files
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
# first one always succeeds, second is skipped because it is only 0.05s
# behind the first, second succeeds because it is 0.11 behind the
# first, fourth is skipped
latencies = [0, 0.05, .051, 0]
expected_success = 2
fake_status_codes = [200] * 3 * expected_success
contexts_fed_in = []
def ratelimit_if(value):
contexts_fed_in.append(value)
# make each update delay before the iter being called again
eventlet.sleep(latencies.pop(0))
return False # returning False overrides normal ratelimiting
orig_rate_limited_iterator = utils.RateLimitedIterator
def fake_rate_limited_iterator(*args, **kwargs):
# insert our own rate limiting function
kwargs['ratelimit_if'] = ratelimit_if
return orig_rate_limited_iterator(*args, **kwargs)
with mocked_http_conn(*fake_status_codes) as fake_conn, \
mock.patch('swift.obj.updater.RateLimitedIterator',
fake_rate_limited_iterator):
daemon.run_once()
self.assertEqual(expected_success, daemon.stats.successes)
expected_skipped = expected_total - expected_success
self.assertEqual(expected_skipped, daemon.stats.skips)
self.assertEqual(expected_skipped,
len(self._find_async_pending_files()))
paths_fed_in = ['/sda1/%(part)s/%(account)s/%(container)s/%(obj)s'
% dict(ctx['update'], part=c1_part)
for ctx in contexts_fed_in]
expected_update_paths = paths_fed_in[:1] * 3 + paths_fed_in[2:3] * 3
actual_update_paths = [req['path'] for req in fake_conn.requests]
self.assertEqual(expected_update_paths, actual_update_paths)
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('2 successes, 0 failures, 0 quarantines, 2 unlinks, '
'0 errors, 0 redirects, 2 skips, 2 deferrals, 0 drains',
info_lines[-1])
self.assertEqual({'skips': 2, 'successes': 2, 'unlinks': 2,
'deferrals': 2},
self.logger.statsd_client.get_increment_counts())
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit_defer_2_skip_1(self, mock_recon):
# limit length of deferral queue so that some defer and some skip
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 10,
# only one bucket needed for test
'per_container_ratelimit_buckets': 1,
'max_deferred_updates': 1,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
# all updates for same container
num_c1_files = 4
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
expected_total = num_c1_files
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
# first succeeds, second is deferred, third succeeds, fourth is
# deferred and bumps second out of deferral queue, fourth is re-tried
latencies = [0, 0.05, .051, 0, 0, .11]
expected_success = 3
contexts_fed_in = []
captured_queues = []
captured_skips_stats = []
def ratelimit_if(value):
contexts_fed_in.append(value)
return False # returning False overrides normal ratelimiting
orig_rate_limited_iterator = utils.RateLimitedIterator
def fake_rate_limited_iterator(*args, **kwargs):
# insert our own rate limiting function
kwargs['ratelimit_if'] = ratelimit_if
return orig_rate_limited_iterator(*args, **kwargs)
now = [time()]
def fake_get_time(bucket_iter):
captured_skips_stats.append(
daemon.logger.statsd_client.get_increment_counts().get(
'skips', 0))
captured_queues.append(list(bucket_iter.buckets[0].deque))
# make each update delay before the iter being called again
now[0] += latencies.pop(0)
return now[0]
captured_updates = []
def fake_object_update(node, part, op, obj, *args, **kwargs):
captured_updates.append((node, part, op, obj))
return True, node['id'], False
with mock.patch(
'swift.obj.updater.BucketizedUpdateSkippingLimiter._get_time',
fake_get_time), \
mock.patch.object(daemon, 'object_update',
fake_object_update), \
mock.patch('swift.obj.updater.RateLimitedIterator',
fake_rate_limited_iterator):
daemon.run_once()
self.assertEqual(expected_success, daemon.stats.successes)
expected_skipped = expected_total - expected_success
self.assertEqual(expected_skipped, daemon.stats.skips)
self.assertEqual(expected_skipped,
len(self._find_async_pending_files()))
orig_iteration = contexts_fed_in[:num_c1_files]
# we first capture every async fed in one by one
objs_fed_in = [ctx['update']['obj'] for ctx in orig_iteration]
self.assertEqual(num_c1_files, len(set(objs_fed_in)))
# keep track of this order for context
aorder = {ctx['update']['obj']: 'a%02d' % i
for i, ctx in enumerate(orig_iteration)}
expected_drops = (1,)
expected_updates_sent = []
for i, obj in enumerate(objs_fed_in):
if i in expected_drops:
continue
# triple replica, request to 3 nodes each obj!
expected_updates_sent.extend([obj] * 3)
actual_updates_sent = [
utils.split_path(update[3], minsegs=3)[-1]
for update in captured_updates
]
self.assertEqual([aorder[o] for o in expected_updates_sent],
[aorder[o] for o in actual_updates_sent])
self.assertEqual([0, 0, 0, 0, 1], captured_skips_stats)
expected_deferrals = [
[],
[],
[objs_fed_in[1]],
[objs_fed_in[1]],
[objs_fed_in[3]],
]
self.assertEqual(
expected_deferrals,
[[ctx['update']['obj'] for ctx in q] for q in captured_queues])
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('3 successes, 0 failures, 0 quarantines, 3 unlinks, '
'0 errors, 0 redirects, 1 skips, 2 deferrals, 1 drains',
info_lines[-1])
self.assertEqual(
{'skips': 1, 'successes': 3, 'unlinks': 3, 'deferrals': 2,
'drains': 1}, self.logger.statsd_client.get_increment_counts())
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit_defer_3_skip_1(self, mock_recon):
# limit length of deferral queue so that some defer and some skip
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 10,
# only one bucket needed for test
'per_container_ratelimit_buckets': 1,
'max_deferred_updates': 2,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
# all updates for same container
num_c1_files = 5
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
expected_total = num_c1_files
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
# indexes 0, 2 succeed; 1, 3, 4 deferred but 1 is bumped from deferral
# queue by 4; 4, 3 are then drained
latencies = [0, 0.05, .051, 0, 0, 0, .11]
expected_success = 4
contexts_fed_in = []
captured_queues = []
captured_skips_stats = []
def ratelimit_if(value):
contexts_fed_in.append(value)
return False # returning False overrides normal ratelimiting
orig_rate_limited_iterator = utils.RateLimitedIterator
def fake_rate_limited_iterator(*args, **kwargs):
# insert our own rate limiting function
kwargs['ratelimit_if'] = ratelimit_if
return orig_rate_limited_iterator(*args, **kwargs)
now = [time()]
def fake_get_time(bucket_iter):
captured_skips_stats.append(
daemon.logger.statsd_client.get_increment_counts().get(
'skips', 0))
captured_queues.append(list(bucket_iter.buckets[0].deque))
# make each update delay before the iter being called again
now[0] += latencies.pop(0)
return now[0]
captured_updates = []
def fake_object_update(node, part, op, obj, *args, **kwargs):
captured_updates.append((node, part, op, obj))
return True, node['id'], False
with mock.patch(
'swift.obj.updater.BucketizedUpdateSkippingLimiter._get_time',
fake_get_time), \
mock.patch.object(daemon, 'object_update',
fake_object_update), \
mock.patch('swift.obj.updater.RateLimitedIterator',
fake_rate_limited_iterator), \
mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
daemon.run_once()
self.assertEqual(expected_success, daemon.stats.successes)
expected_skipped = expected_total - expected_success
self.assertEqual(expected_skipped, daemon.stats.skips)
self.assertEqual(expected_skipped,
len(self._find_async_pending_files()))
orig_iteration = contexts_fed_in[:num_c1_files]
# we first capture every async fed in one by one
objs_fed_in = [ctx['update']['obj'] for ctx in orig_iteration]
self.assertEqual(num_c1_files, len(set(objs_fed_in)))
# keep track of this order for context
aorder = {ctx['update']['obj']: 'a%02d' % i
for i, ctx in enumerate(orig_iteration)}
expected_updates_sent = []
for index_sent in (0, 2, 4, 3):
expected_updates_sent.extend(
[contexts_fed_in[index_sent]['update']['obj']] * 3)
actual_updates_sent = [
utils.split_path(update[3], minsegs=3)[-1]
for update in captured_updates
]
self.assertEqual([aorder[o] for o in expected_updates_sent],
[aorder[o] for o in actual_updates_sent])
self.assertEqual([0, 0, 0, 0, 0, 1, 1], captured_skips_stats)
expected_deferrals = [
[],
[],
[objs_fed_in[1]],
[objs_fed_in[1]],
[objs_fed_in[1], objs_fed_in[3]],
[objs_fed_in[3], objs_fed_in[4]],
[objs_fed_in[3]], # note: rightmost element is drained
]
self.assertEqual(
expected_deferrals,
[[ctx['update']['obj'] for ctx in q] for q in captured_queues])
actual_sleeps = [call[0][0] for call in mock_sleep.call_args_list]
self.assertEqual(2, len(actual_sleeps))
self.assertAlmostEqual(0.1, actual_sleeps[0], 3)
self.assertAlmostEqual(0.09, actual_sleeps[1], 3)
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('4 successes, 0 failures, 0 quarantines, 4 unlinks, '
'0 errors, 0 redirects, 1 skips, 3 deferrals, 2 drains',
info_lines[-1])
self.assertEqual(
{'skips': 1, 'successes': 4, 'unlinks': 4, 'deferrals': 3,
'drains': 2}, self.logger.statsd_client.get_increment_counts())
@mock.patch('swift.obj.updater.dump_recon_cache')
def test_per_container_rate_limit_unsent_deferrals(self, mock_recon):
# make some updates defer until interval is reached and cycle
# terminates
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'max_objects_per_container_per_second': 10,
# only one bucket needed for test
'per_container_ratelimit_buckets': 1,
'max_deferred_updates': 5,
'interval': 0.4,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(self.async_dir)
# all updates for same container
num_c1_files = 7
for i in range(num_c1_files):
obj_name = 'o%02d' % i
self._make_async_pending_pickle('a', 'c1', obj_name)
c1_part, _ = daemon.get_container_ring().get_nodes('a', 'c1')
expected_total = num_c1_files
self.assertEqual(expected_total,
len(self._find_async_pending_files()))
# first pass: 0, 2 and 5 succeed, 1, 3, 4, 6 deferred
# last 2 deferred items sent before interval elapses
latencies = [0, .05, 0.051, 0, 0, .11, 0, 0,
0.1, 0.1, 0] # total 0.411
expected_success = 5
contexts_fed_in = []
captured_queues = []
captured_skips_stats = []
def ratelimit_if(value):
contexts_fed_in.append(value)
return False # returning False overrides normal ratelimiting
orig_rate_limited_iterator = utils.RateLimitedIterator
def fake_rate_limited_iterator(*args, **kwargs):
# insert our own rate limiting function
kwargs['ratelimit_if'] = ratelimit_if
return orig_rate_limited_iterator(*args, **kwargs)
start = time()
now = [start]
def fake_get_time(bucket_iter):
if not captured_skips_stats:
daemon.begin = now[0]
captured_skips_stats.append(
daemon.logger.statsd_client.get_increment_counts().get(
'skips', 0))
captured_queues.append(list(bucket_iter.buckets[0].deque))
# insert delay each time iter is called
now[0] += latencies.pop(0)
return now[0]
captured_updates = []
def fake_object_update(node, part, op, obj, *args, **kwargs):
captured_updates.append((node, part, op, obj))
return True, node['id'], False
with mock.patch(
'swift.obj.updater.BucketizedUpdateSkippingLimiter._get_time',
fake_get_time), \
mock.patch.object(daemon, 'object_update',
fake_object_update), \
mock.patch('swift.obj.updater.RateLimitedIterator',
fake_rate_limited_iterator), \
mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
daemon.run_once()
self.assertEqual(expected_success, daemon.stats.successes)
expected_skipped = expected_total - expected_success
self.assertEqual(expected_skipped, daemon.stats.skips)
self.assertEqual(expected_skipped,
len(self._find_async_pending_files()))
expected_updates_sent = []
for index_sent in (0, 2, 5, 6, 4):
expected_updates_sent.extend(
[contexts_fed_in[index_sent]['update']['obj']] * 3)
actual_updates_sent = [
utils.split_path(update[3], minsegs=3)[-1]
for update in captured_updates
]
self.assertEqual(expected_updates_sent, actual_updates_sent)
# skips (un-drained deferrals) not reported until end of cycle
self.assertEqual([0] * 10, captured_skips_stats)
objs_fed_in = [ctx['update']['obj'] for ctx in contexts_fed_in]
expected_deferrals = [
# queue content before app_iter feeds next update_ctx
[],
[],
[objs_fed_in[1]],
[objs_fed_in[1]],
[objs_fed_in[1], objs_fed_in[3]],
[objs_fed_in[1], objs_fed_in[3], objs_fed_in[4]],
[objs_fed_in[1], objs_fed_in[3], objs_fed_in[4]],
# queue content before each update_ctx is drained from queue...
# note: rightmost element is drained
[objs_fed_in[1], objs_fed_in[3], objs_fed_in[4], objs_fed_in[6]],
[objs_fed_in[1], objs_fed_in[3], objs_fed_in[4]],
[objs_fed_in[1], objs_fed_in[3]],
]
self.assertEqual(
expected_deferrals,
[[ctx['update']['obj'] for ctx in q] for q in captured_queues])
actual_sleeps = [call[0][0] for call in mock_sleep.call_args_list]
self.assertEqual(2, len(actual_sleeps))
self.assertAlmostEqual(0.1, actual_sleeps[0], 3)
self.assertAlmostEqual(0.1, actual_sleeps[1], 3)
info_lines = self.logger.get_lines_for_level('info')
self.assertTrue(info_lines)
self.assertIn('5 successes, 0 failures, 0 quarantines, 5 unlinks, '
'0 errors, 0 redirects, 2 skips, 4 deferrals, 2 drains',
info_lines[-1])
self.assertEqual(
{'successes': 5, 'unlinks': 5, 'deferrals': 4, 'drains': 2},
self.logger.statsd_client.get_increment_counts())
self.assertEqual(
2, self.logger.statsd_client.get_stats_counts()['skips'])
class TestObjectUpdaterFunctions(unittest.TestCase):
def test_split_update_path(self):
update = {
'op': 'PUT',
'account': 'a',
'container': 'c',
'obj': 'o',
'headers': {
'X-Container-Timestamp': normalize_timestamp(0),
}
}
actual = object_updater.split_update_path(update)
self.assertEqual(('a', 'c'), actual)
update['container_path'] = None
actual = object_updater.split_update_path(update)
self.assertEqual(('a', 'c'), actual)
update['container_path'] = '.shards_a/c_shard_n'
actual = object_updater.split_update_path(update)
self.assertEqual(('.shards_a', 'c_shard_n'), actual)
class TestBucketizedUpdateSkippingLimiter(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
self.stats = object_updater.SweepStats()
def test_init(self):
it = object_updater.BucketizedUpdateSkippingLimiter(
[3, 1], self.logger, self.stats, 1000, 10)
self.assertEqual(1000, it.num_buckets)
self.assertEqual([10] * 1000, [b.max_rate for b in it.buckets])
self.assertEqual([3, 1], [x for x in it.iterator])
# rate of 0 implies unlimited
it = object_updater.BucketizedUpdateSkippingLimiter(
iter([3, 1]), self.logger, self.stats, 9, 0)
self.assertEqual(9, it.num_buckets)
self.assertEqual([0] * 9, [b.max_rate for b in it.buckets])
self.assertEqual([3, 1], [x for x in it.iterator])
# num_buckets is collared at 1
it = object_updater.BucketizedUpdateSkippingLimiter(
iter([3, 1]), self.logger, self.stats, 0, 1)
self.assertEqual(1, it.num_buckets)
self.assertEqual([1], [b.max_rate for b in it.buckets])
self.assertEqual([3, 1], [x for x in it.iterator])
def test_iteration_unlimited(self):
# verify iteration at unlimited rate
update_ctxs = [
{'update': {'account': '%d' % i, 'container': '%s' % i}}
for i in range(20)]
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs), self.logger, self.stats, 9, 0)
self.assertEqual(update_ctxs, [x for x in it])
self.assertEqual(0, self.stats.skips)
self.assertEqual(0, self.stats.drains)
self.assertEqual(0, self.stats.deferrals)
def test_iteration_ratelimited(self):
# verify iteration at limited rate - single bucket
update_ctxs = [
{'update': {'account': '%d' % i, 'container': '%s' % i}}
for i in range(2)]
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs), self.logger, self.stats, 1, 0.1)
# second update is skipped
self.assertEqual(update_ctxs[:1], [x for x in it])
self.assertEqual(1, self.stats.skips)
self.assertEqual(0, self.stats.drains)
self.assertEqual(1, self.stats.deferrals)
def test_deferral_single_bucket(self):
# verify deferral - single bucket
now = time()
update_ctxs = [
{'update': {'account': '%d' % i, 'container': '%s' % i}}
for i in range(4)]
# enough capacity for all deferrals
with mock.patch('swift.obj.updater.time.time',
side_effect=[now, now, now, now, now, now]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs[:3]), self.logger, self.stats, 1, 10,
max_deferred_elements=2,
drain_until=now + 10)
actual = [x for x in it]
self.assertEqual([update_ctxs[0],
update_ctxs[2], # deferrals...
update_ctxs[1]],
actual)
self.assertEqual(2, mock_sleep.call_count)
self.assertEqual(0, self.stats.skips)
self.assertEqual(2, self.stats.drains)
self.assertEqual(2, self.stats.deferrals)
self.stats.reset()
# only space for one deferral
with mock.patch('swift.obj.updater.time.time',
side_effect=[now, now, now, now, now]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs[:3]), self.logger, self.stats, 1, 10,
max_deferred_elements=1,
drain_until=now + 10)
actual = [x for x in it]
self.assertEqual([update_ctxs[0],
update_ctxs[2]], # deferrals...
actual)
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(1, self.stats.skips)
self.assertEqual(1, self.stats.drains)
self.assertEqual(2, self.stats.deferrals)
self.stats.reset()
# only time for one deferral
with mock.patch('swift.obj.updater.time.time',
side_effect=[now, now, now, now, now + 20, now + 20]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs[:3]), self.logger, self.stats, 1, 10,
max_deferred_elements=2,
drain_until=now + 10)
actual = [x for x in it]
self.assertEqual([update_ctxs[0],
update_ctxs[2]], # deferrals...
actual)
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(1, self.stats.skips)
self.assertEqual(1, self.stats.drains)
self.assertEqual(2, self.stats.deferrals)
self.stats.reset()
# only space for two deferrals, only time for one deferral
with mock.patch('swift.obj.updater.time.time',
side_effect=[now, now, now, now, now,
now + 20, now + 20]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs), self.logger, self.stats, 1, 10,
max_deferred_elements=2,
drain_until=now + 10)
actual = [x for x in it]
self.assertEqual([update_ctxs[0],
update_ctxs[3]], # deferrals...
actual)
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, self.stats.skips)
self.assertEqual(1, self.stats.drains)
self.assertEqual(3, self.stats.deferrals)
self.stats.reset()
def test_deferral_multiple_buckets(self):
# verify deferral - multiple buckets
update_ctxs_1 = [
{'update': {'account': 'a', 'container': 'c1', 'obj': '%3d' % i}}
for i in range(3)]
update_ctxs_2 = [
{'update': {'account': 'a', 'container': 'c2', 'obj': '%3d' % i}}
for i in range(3)]
time_iter = itertools.count(time(), 0.001)
# deferrals stick in both buckets
with mock.patch('swift.obj.updater.time.time',
side_effect=[next(time_iter) for _ in range(12)]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs_1 + update_ctxs_2),
self.logger, self.stats, 4, 10,
max_deferred_elements=4,
drain_until=next(time_iter))
it.salt = '' # make container->bucket hashing predictable
actual = [x for x in it]
self.assertEqual([update_ctxs_1[0],
update_ctxs_2[0],
update_ctxs_1[2], # deferrals...
update_ctxs_2[2],
update_ctxs_1[1],
update_ctxs_2[1],
],
actual)
self.assertEqual(4, mock_sleep.call_count)
self.assertEqual(0, self.stats.skips)
self.assertEqual(4, self.stats.drains)
self.assertEqual(4, self.stats.deferrals)
self.stats.reset()
# oldest deferral bumped from one bucket due to max_deferrals == 3
with mock.patch('swift.obj.updater.time.time',
side_effect=[next(time_iter) for _ in range(10)]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs_1 + update_ctxs_2),
self.logger, self.stats, 4, 10,
max_deferred_elements=3,
drain_until=next(time_iter))
it.salt = '' # make container->bucket hashing predictable
actual = [x for x in it]
self.assertEqual([update_ctxs_1[0],
update_ctxs_2[0],
update_ctxs_1[2], # deferrals...
update_ctxs_2[2],
update_ctxs_2[1],
],
actual)
self.assertEqual(3, mock_sleep.call_count)
self.assertEqual(1, self.stats.skips)
self.assertEqual(3, self.stats.drains)
self.assertEqual(4, self.stats.deferrals)
self.stats.reset()
# older deferrals bumped from one bucket due to max_deferrals == 2
with mock.patch('swift.obj.updater.time.time',
side_effect=[next(time_iter) for _ in range(10)]):
with mock.patch('swift.common.utils.eventlet.sleep') as mock_sleep:
it = object_updater.BucketizedUpdateSkippingLimiter(
iter(update_ctxs_1 + update_ctxs_2),
self.logger, self.stats, 4, 10,
max_deferred_elements=2,
drain_until=next(time_iter))
it.salt = '' # make container->bucket hashing predictable
actual = [x for x in it]
self.assertEqual([update_ctxs_1[0],
update_ctxs_2[0],
update_ctxs_2[2], # deferrals...
update_ctxs_2[1],
],
actual)
self.assertEqual(2, mock_sleep.call_count)
self.assertEqual(2, self.stats.skips)
self.assertEqual(2, self.stats.drains)
self.assertEqual(4, self.stats.deferrals)
self.stats.reset()
class TestRateLimiterBucket(unittest.TestCase):
def test_len(self):
b1 = object_updater.RateLimiterBucket(0.1)
b1.deque.append(1)
b1.deque.append(2)
self.assertEqual(2, len(b1))
b1.deque.pop()
self.assertEqual(1, len(b1))
def test_bool(self):
b1 = object_updater.RateLimiterBucket(0.1)
self.assertFalse(b1)
b1.deque.append(1)
self.assertTrue(b1)
b1.deque.pop()
self.assertFalse(b1)
def test_bucket_ordering(self):
time_iter = itertools.count(time(), step=0.001)
b1 = object_updater.RateLimiterBucket(10)
b2 = object_updater.RateLimiterBucket(10)
b2.running_time = next(time_iter)
buckets = PriorityQueue()
buckets.put(b1)
buckets.put(b2)
self.assertEqual([b1, b2], [buckets.get_nowait() for _ in range(2)])
b1.running_time = next(time_iter)
buckets.put(b1)
buckets.put(b2)
self.assertEqual([b2, b1], [buckets.get_nowait() for _ in range(2)])
class TestSweepStats(unittest.TestCase):
def test_copy(self):
num_props = len(vars(object_updater.SweepStats()))
stats = object_updater.SweepStats(*range(1, num_props + 1))
stats2 = stats.copy()
self.assertEqual(vars(stats), vars(stats2))
def test_since(self):
stats = object_updater.SweepStats(1, 2, 3, 4, 5, 6, 7, 8, 9)
stats2 = object_updater.SweepStats(4, 6, 8, 10, 12, 14, 16, 18, 20)
expected = object_updater.SweepStats(3, 4, 5, 6, 7, 8, 9, 10, 11)
self.assertEqual(vars(expected), vars(stats2.since(stats)))
def test_reset(self):
num_props = len(vars(object_updater.SweepStats()))
stats = object_updater.SweepStats(*range(1, num_props + 1))
stats.reset()
expected = object_updater.SweepStats()
self.assertEqual(vars(expected), vars(stats))
def test_str(self):
num_props = len(vars(object_updater.SweepStats()))
stats = object_updater.SweepStats(*range(1, num_props + 1))
self.assertEqual(
'4 successes, 2 failures, 3 quarantines, 5 unlinks, 1 errors, '
'6 redirects, 7 skips, 8 deferrals, 9 drains', str(stats))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_updater.py |
swift-master | test/unit/obj/__init__.py |
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import mock
import os
import pkg_resources
import signal
import string
import sys
import time
import xattr
from shutil import rmtree
from tempfile import mkdtemp
import textwrap
from os.path import dirname, basename
from test import BaseTestCase
from test.debug_logger import debug_logger
from test.unit import (
DEFAULT_TEST_EC_TYPE, make_timestamp_iter, patch_policies,
skip_if_no_xattrs)
from test.unit.obj.common import write_diskfile
from swift.obj import auditor, replicator
from swift.obj.watchers.dark_data import DarkDataWatcher
from swift.obj.diskfile import (
DiskFile, write_metadata, invalidate_hash, get_data_dir,
DiskFileManager, ECDiskFileManager, AuditLocation, clear_auditor_status,
get_auditor_status, HASH_FILE, HASH_INVALIDATIONS_FILE)
from swift.common.exceptions import ClientException
from swift.common.utils import (
mkdirs, normalize_timestamp, Timestamp, readconf, md5, PrefixLoggerAdapter)
from swift.common.storage_policy import (
ECStoragePolicy, StoragePolicy, POLICIES, EC_POLICY)
_mocked_policies = [
StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
ECStoragePolicy(2, 'two', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096),
]
def works_only_once(callable_thing, exception):
called = [False]
def only_once(*a, **kw):
if called[0]:
raise exception
else:
called[0] = True
return callable_thing(*a, **kw)
return only_once
def no_audit_watchers(group, name=None):
if group == 'swift.object_audit_watcher':
return iter([])
else:
return pkg_resources.iter_entry_points(group, name)
class FakeRing1(object):
def __init__(self, swift_dir, ring_name=None):
return
def get_nodes(self, *args, **kwargs):
x = 1
node1 = {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': 6200 + x,
'replication_port': 6200 + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x,
'handoff_index': 1}
return (1, [node1])
class FakeRing2(object):
def __init__(self, swift_dir, ring_name=None):
return
def get_nodes(self, *args, **kwargs):
nodes = []
for x in [1, 2]:
nodes.append({'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': 6200 + x,
'replication_port': 6200 + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x,
'handoff_index': 1})
return (1, nodes)
class TestAuditorBase(BaseTestCase):
def setUp(self):
skip_if_no_xattrs()
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = debug_logger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
# policy 2
self.objects_p2 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[2]))
self.objects_2_p2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[2]))
os.mkdir(self.objects_p2)
self.parts = {}
self.parts_p1 = {}
self.parts_p2 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
self.parts_p2[part] = os.path.join(self.objects_p2, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
os.mkdir(os.path.join(self.objects_p2, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.ec_df_mgr = ECDiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1, 2
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c2',
'o', policy=POLICIES[1])
self.disk_file_ec = self.ec_df_mgr.get_diskfile(
'sda', '0', 'a', 'c_ec', 'o', policy=POLICIES[2], frag_index=1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
@patch_policies(_mocked_policies)
class TestAuditor(TestAuditorBase):
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEqual(auditor_worker.max_bytes_per_second, 10000000)
self.assertEqual(auditor_worker.log_time, 3600)
# test default values
conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 65536)
self.assertEqual(auditor_worker.max_files_per_second, 20)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({'disk_chunk_size': 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 4096)
self.assertEqual(auditor_worker.max_files_per_second, 50)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = b'0' * 1024
if disk_file.policy.policy_type == EC_POLICY:
data = disk_file.policy.pyeclib_driver.encode(data)[0]
etag = md5(usedforsecurity=False)
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
if disk_file.policy.policy_type == EC_POLICY:
metadata.update({
'X-Object-Sysmeta-Ec-Frag-Index': '1',
'X-Object-Sysmeta-Ec-Etag': 'fake-etag',
})
writer.put(metadata)
writer.commit(Timestamp(timestamp))
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0',
policy=disk_file.policy))
self.assertEqual(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, b'extra_data')
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0',
policy=disk_file.policy))
self.assertEqual(auditor_worker.quarantines,
pre_quarantines + 1)
run_tests(self.disk_file)
run_tests(self.disk_file_p1)
run_tests(self.disk_file_ec)
def test_object_audit_adds_metadata_checksums(self):
disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o-md',
policy=POLICIES.legacy)
# simulate a PUT
now = time.time()
data = b'boots and cats and ' * 1024
hasher = md5(usedforsecurity=False)
with disk_file.create() as writer:
writer.write(data)
hasher.update(data)
etag = hasher.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(now)),
'Content-Length': len(data),
'Content-Type': 'the old type',
}
writer.put(metadata)
writer.commit(Timestamp(now))
# simulate a subsequent POST
post_metadata = metadata.copy()
post_metadata['Content-Type'] = 'the new type'
post_metadata['X-Object-Meta-Biff'] = 'buff'
post_metadata['X-Timestamp'] = str(normalize_timestamp(now + 1))
disk_file.write_metadata(post_metadata)
file_paths = [os.path.join(disk_file._datadir, fname)
for fname in os.listdir(disk_file._datadir)
if fname not in ('.', '..')]
file_paths.sort()
# sanity check: make sure we have a .data and a .meta file
self.assertEqual(len(file_paths), 2)
self.assertTrue(file_paths[0].endswith(".data"))
self.assertTrue(file_paths[1].endswith(".meta"))
# Go remove the xattr "user.swift.metadata_checksum" as if this
# object were written before Swift supported metadata checksums.
for file_path in file_paths:
xattr.removexattr(file_path, "user.swift.metadata_checksum")
# Run the auditor...
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0',
policy=disk_file.policy))
self.assertEqual(auditor_worker.quarantines, 0) # sanity
# ...and the checksums are back
for file_path in file_paths:
metadata = xattr.getxattr(file_path, "user.swift.metadata")
i = 1
while True:
try:
metadata += xattr.getxattr(
file_path, "user.swift.metadata%d" % i)
i += 1
except (IOError, OSError):
break
checksum = xattr.getxattr(
file_path, "user.swift.metadata_checksum")
self.assertEqual(
checksum,
(md5(metadata, usedforsecurity=False).hexdigest()
.encode('ascii')))
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = b'0' * 1024
etag = md5(usedforsecurity=False)
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES.legacy)
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0',
policy=POLICIES.legacy))
self.assertEqual(auditor_worker.quarantines, pre_quarantines)
etag = md5(b'1' + b'0' * 1023, usedforsecurity=False).hexdigest()
metadata['ETag'] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
writer.commit(Timestamp(timestamp))
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0',
policy=POLICIES.legacy))
self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_checks_EC_fragments(self):
disk_file = self.disk_file_ec
def do_test(data):
# create diskfile and set ETag and content-length to match the data
etag = md5(data, usedforsecurity=False).hexdigest()
timestamp = str(normalize_timestamp(time.time()))
with disk_file.create() as writer:
writer.write(data)
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': len(data),
'X-Object-Sysmeta-Ec-Frag-Index': '1',
'X-Object-Sysmeta-Ec-Etag': 'fake-etag',
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
self.logger.clear()
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
self.assertEqual(0, auditor_worker.quarantines) # sanity check
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0',
policy=disk_file.policy))
return auditor_worker
# two good frags in an EC archive
frag_0 = disk_file.policy.pyeclib_driver.encode(
b'x' * disk_file.policy.ec_segment_size)[0]
frag_1 = disk_file.policy.pyeclib_driver.encode(
b'y' * disk_file.policy.ec_segment_size)[0]
data = frag_0 + frag_1
auditor_worker = do_test(data)
self.assertEqual(0, auditor_worker.quarantines)
self.assertFalse(auditor_worker.logger.get_lines_for_level('error'))
# corrupt second frag headers
corrupt_frag_1 = b'blah' * 16 + frag_1[64:]
data = frag_0 + corrupt_frag_1
auditor_worker = do_test(data)
self.assertEqual(1, auditor_worker.quarantines)
log_lines = auditor_worker.logger.get_lines_for_level('error')
self.assertIn('failed audit and was quarantined: '
'Invalid EC metadata at offset 0x%x' %
len(frag_0),
log_lines[0])
# dangling extra corrupt frag data
data = frag_0 + frag_1 + b'wtf' * 100
auditor_worker = do_test(data)
self.assertEqual(1, auditor_worker.quarantines)
log_lines = auditor_worker.logger.get_lines_for_level('error')
self.assertIn('failed audit and was quarantined: '
'Invalid EC metadata at offset 0x%x' %
len(frag_0 + frag_1),
log_lines[0])
# simulate bug https://bugs.launchpad.net/bugs/1631144 by writing start
# of an ssync subrequest into the diskfile
data = (
b'PUT /a/c/o\r\n' +
b'Content-Length: 999\r\n' +
b'Content-Type: image/jpeg\r\n' +
b'X-Object-Sysmeta-Ec-Content-Length: 1024\r\n' +
b'X-Object-Sysmeta-Ec-Etag: 1234bff7eb767cc6d19627c6b6f9edef\r\n' +
b'X-Object-Sysmeta-Ec-Frag-Index: 1\r\n' +
b'X-Object-Sysmeta-Ec-Scheme: ' +
DEFAULT_TEST_EC_TYPE.encode('ascii') + b'\r\n' +
b'X-Object-Sysmeta-Ec-Segment-Size: 1048576\r\n' +
b'X-Timestamp: 1471512345.17333\r\n\r\n'
)
data += frag_0[:disk_file.policy.fragment_size - len(data)]
auditor_worker = do_test(data)
self.assertEqual(1, auditor_worker.quarantines)
log_lines = auditor_worker.logger.get_lines_for_level('error')
self.assertIn('failed audit and was quarantined: '
'Invalid EC metadata at offset 0x0',
log_lines[0])
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
fp = open(path, 'wb')
fp.write(b'0' * 1024)
fp.close()
invalidate_hash(os.path.dirname(self.disk_file._datadir))
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0',
policy=POLICIES.legacy))
self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_will_not_swallow_errors_in_tests(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
with open(path, 'w') as f:
write_metadata(f, {'name': '/a/c/o'})
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
def blowup(*args):
raise NameError('tpyo')
with mock.patch.object(DiskFileManager,
'get_diskfile_from_audit_location', blowup):
self.assertRaises(NameError, auditor_worker.object_audit,
AuditLocation(os.path.dirname(path), 'sda', '0',
policy=POLICIES.legacy))
def test_failsafe_object_audit_will_swallow_errors_in_tests(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
with open(path, 'w') as f:
write_metadata(f, {'name': '/a/c/o'})
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
def blowup(*args):
raise NameError('tpyo')
with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
blowup):
auditor_worker.failsafe_object_audit(
AuditLocation(os.path.dirname(path), 'sda', '0',
policy=POLICIES.legacy))
self.assertEqual(auditor_worker.errors, 1)
def test_audit_location_gets_quarantined(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
location = AuditLocation(self.disk_file._datadir, 'sda', '0',
policy=self.disk_file.policy)
# instead of a datadir, we'll make a file!
mkdirs(os.path.dirname(self.disk_file._datadir))
open(self.disk_file._datadir, 'w')
# after we turn the crank ...
auditor_worker.object_audit(location)
# ... it should get quarantined
self.assertFalse(os.path.exists(self.disk_file._datadir))
self.assertEqual(1, auditor_worker.quarantines)
def test_rsync_tempfile_timeout_auto_option(self):
# if we don't have access to the replicator config section we'll use
# our default
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400)
# if the rsync_tempfile_timeout option is set explicitly we use that
self.conf['rsync_tempfile_timeout'] = '1800'
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 1800)
# if we have a real config we can be a little smarter
config_path = os.path.join(self.testdir, 'objserver.conf')
stub_config = """
[object-auditor]
rsync_tempfile_timeout = auto
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
# the Daemon loader will hand the object-auditor config to the
# auditor who will build the workers from it
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
# if there is no object-replicator section we still have to fall back
# to default because we can't parse the config for that section!
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400)
stub_config = """
[object-replicator]
[object-auditor]
rsync_tempfile_timeout = auto
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
# if the object-replicator section will parse but does not override
# the default rsync_timeout we assume the default rsync_timeout value
# and add 15mins
self.assertEqual(auditor_worker.rsync_tempfile_timeout,
replicator.DEFAULT_RSYNC_TIMEOUT + 900)
stub_config = """
[DEFAULT]
reclaim_age = 1209600
[object-replicator]
rsync_timeout = 3600
[object-auditor]
rsync_tempfile_timeout = auto
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
# if there is an object-replicator section with a rsync_timeout
# configured we'll use that value (3600) + 900
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 3600 + 900)
def test_inprogress_rsync_tempfiles_get_cleaned_up(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
location = AuditLocation(self.disk_file._datadir, 'sda', '0',
policy=self.disk_file.policy)
data = b'VERIFY'
etag = md5(usedforsecurity=False)
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
metadata = {
'ETag': etag.hexdigest(),
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
datafilename = None
datadir_files = os.listdir(self.disk_file._datadir)
for filename in datadir_files:
if filename.endswith('.data'):
datafilename = filename
break
else:
self.fail('Did not find .data file in %r: %r' %
(self.disk_file._datadir, datadir_files))
rsynctempfile_path = os.path.join(self.disk_file._datadir,
'.%s.9ILVBL' % datafilename)
open(rsynctempfile_path, 'w')
# sanity check we have an extra file
rsync_files = os.listdir(self.disk_file._datadir)
self.assertEqual(len(datadir_files) + 1, len(rsync_files))
# and after we turn the crank ...
auditor_worker.object_audit(location)
# ... we've still got the rsync file
self.assertEqual(rsync_files, os.listdir(self.disk_file._datadir))
# and we'll keep it - depending on the rsync_tempfile_timeout
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400)
self.conf['rsync_tempfile_timeout'] = '3600'
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
self.assertEqual(auditor_worker.rsync_tempfile_timeout, 3600)
now = time.time() + 1900
with mock.patch('swift.obj.auditor.time.time',
return_value=now):
auditor_worker.object_audit(location)
self.assertEqual(rsync_files, os.listdir(self.disk_file._datadir))
# but *tomorrow* when we run
tomorrow = time.time() + 86400
with mock.patch('swift.obj.auditor.time.time',
return_value=tomorrow):
auditor_worker.object_audit(location)
# ... we'll totally clean that stuff up!
self.assertEqual(datadir_files, os.listdir(self.disk_file._datadir))
# but if we have some random crazy file in there
random_crazy_file_path = os.path.join(self.disk_file._datadir,
'.random.crazy.file')
open(random_crazy_file_path, 'w')
tomorrow = time.time() + 86400
with mock.patch('swift.obj.auditor.time.time',
return_value=tomorrow):
auditor_worker.object_audit(location)
# that's someone elses problem
self.assertIn(os.path.basename(random_crazy_file_path),
os.listdir(self.disk_file._datadir))
def test_generic_exception_handling(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
# pretend that we logged (and reset counters) just now
auditor_worker.last_logged = time.time()
timestamp = str(normalize_timestamp(time.time()))
pre_errors = auditor_worker.errors
data = b'0' * 1024
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
lambda *_: 1 / 0):
auditor_worker.audit_all_objects()
self.assertEqual(auditor_worker.errors, pre_errors + 1)
def test_object_run_once_pass(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
auditor_worker.log_time = 0
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = auditor_worker.quarantines
data = b'0' * 1024
def write_file(df):
with df.create() as writer:
writer.write(data)
metadata = {
'ETag': md5(data, usedforsecurity=False).hexdigest(),
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
# policy 0
write_file(self.disk_file)
# policy 1
write_file(self.disk_file_p1)
# policy 2
write_file(self.disk_file_ec)
auditor_worker.audit_all_objects()
self.assertEqual(auditor_worker.quarantines, pre_quarantines)
# 1 object per policy falls into 1024 bucket
self.assertEqual(auditor_worker.stats_buckets[1024], 3)
self.assertEqual(auditor_worker.stats_buckets[10240], 0)
# pick up some additional code coverage, large file
data = b'0' * 1024 * 1024
for df in (self.disk_file, self.disk_file_ec):
with df.create() as writer:
writer.write(data)
metadata = {
'ETag': md5(data, usedforsecurity=False).hexdigest(),
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb'])
self.assertEqual(auditor_worker.quarantines, pre_quarantines)
# still have the 1024 byte object left in policy-1 (plus the
# stats from the original 3)
self.assertEqual(auditor_worker.stats_buckets[1024], 4)
self.assertEqual(auditor_worker.stats_buckets[10240], 0)
# and then policy-0 disk_file was re-written as a larger object
self.assertEqual(auditor_worker.stats_buckets['OVER'], 2)
# pick up even more additional code coverage, misc paths
auditor_worker.log_time = -1
auditor_worker.stats_sizes = []
auditor_worker.audit_all_objects(device_dirs=['sda', 'sdb'])
self.assertEqual(auditor_worker.quarantines, pre_quarantines)
self.assertEqual(auditor_worker.stats_buckets[1024], 4)
self.assertEqual(auditor_worker.stats_buckets[10240], 0)
self.assertEqual(auditor_worker.stats_buckets['OVER'], 2)
def test_object_run_logging(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
auditor_worker.audit_all_objects(device_dirs=['sda'])
log_lines = self.logger.get_lines_for_level('info')
self.assertGreater(len(log_lines), 0)
self.assertIn('ALL - parallel, sda', log_lines[0])
self.logger.clear()
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
auditor_worker.audit_all_objects(device_dirs=['sda'])
log_lines = self.logger.get_lines_for_level('info')
self.assertGreater(len(log_lines), 0)
self.assertIn('ZBF - sda', log_lines[0])
def test_object_run_recon_cache(self):
ts = Timestamp(time.time())
data = b'test_data'
with self.disk_file.create() as writer:
writer.write(data)
metadata = {
'ETag': md5(data, usedforsecurity=False).hexdigest(),
'X-Timestamp': ts.normal,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(ts)
# all devices
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
auditor_worker.audit_all_objects()
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
expected = {'object_auditor_stats_ALL':
{'passes': 1, 'errors': 0, 'audit_time': mock.ANY,
'start_time': mock.ANY, 'quarantined': 0,
'bytes_processed': 9}}
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
self.assertEqual(expected, actual_rcache)
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
auditor_worker.audit_all_objects()
self.assertEqual(expected, actual_rcache)
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
expected.update({
'object_auditor_stats_ZBF':
{'passes': 1, 'errors': 0, 'audit_time': mock.ANY,
'start_time': mock.ANY, 'quarantined': 0,
'bytes_processed': 0}})
self.assertEqual(expected, actual_rcache)
# specific devices
os.unlink(self.rcache)
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
auditor_worker.audit_all_objects(device_dirs=['sda'])
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
expected = {'object_auditor_stats_ALL':
{'sda': {'passes': 1, 'errors': 0, 'audit_time': mock.ANY,
'start_time': mock.ANY, 'quarantined': 0,
'bytes_processed': 9}}}
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
self.assertEqual(expected, actual_rcache)
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
auditor_worker.audit_all_objects(device_dirs=['sda'])
self.assertEqual(expected, actual_rcache)
with open(self.rcache) as fd:
actual_rcache = json.load(fd)
expected.update({
'object_auditor_stats_ZBF':
{'sda': {'passes': 1, 'errors': 0, 'audit_time': mock.ANY,
'start_time': mock.ANY, 'quarantined': 0,
'bytes_processed': 0}}})
self.assertEqual(expected, actual_rcache)
def test_object_run_once_no_sda(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = auditor_worker.quarantines
# pretend that we logged (and reset counters) just now
auditor_worker.last_logged = time.time()
data = b'0' * 1024
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
os.write(writer._fd, b'extra_data')
writer.commit(Timestamp(timestamp))
auditor_worker.audit_all_objects()
self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_run_once_multi_devices(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
# pretend that we logged (and reset counters) just now
auditor_worker.last_logged = time.time()
timestamp = str(normalize_timestamp(time.time()))
pre_quarantines = auditor_worker.quarantines
data = b'0' * 10
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
auditor_worker.audit_all_objects()
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob',
policy=POLICIES.legacy)
data = b'1' * 10
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
os.write(writer._fd, b'extra_data')
auditor_worker.audit_all_objects()
self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_run_fast_track_non_zero(self):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
data = b'0' * 1024
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
etag = md5(usedforsecurity=False)
etag.update(b'1' + b'0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(writer._fd, metadata)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
kwargs = {'mode': 'once'}
kwargs['zero_byte_fps'] = 50
self.auditor.run_audit(**kwargs)
self.assertFalse(os.path.isdir(quarantine_path))
del(kwargs['zero_byte_fps'])
clear_auditor_status(self.devices, 'objects')
self.auditor.run_audit(**kwargs)
self.assertTrue(os.path.isdir(quarantine_path))
def setup_bad_zero_byte(self, timestamp=None):
if timestamp is None:
timestamp = Timestamp.now()
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
etag = md5(usedforsecurity=False)
with self.disk_file.create() as writer:
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp.internal,
'Content-Length': 10,
}
writer.put(metadata)
writer.commit(Timestamp(timestamp))
etag = md5(usedforsecurity=False)
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(writer._fd, metadata)
def test_object_run_fast_track_all(self):
self.setup_bad_zero_byte()
kwargs = {'mode': 'once'}
self.auditor.run_audit(**kwargs)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
def test_object_run_fast_track_zero(self):
self.setup_bad_zero_byte()
kwargs = {'mode': 'once'}
kwargs['zero_byte_fps'] = 50
called_args = [0]
def mock_get_auditor_status(path, logger, audit_type):
called_args[0] = audit_type
return get_auditor_status(path, logger, audit_type)
with mock.patch('swift.obj.diskfile.get_auditor_status',
mock_get_auditor_status):
self.auditor.run_audit(**kwargs)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
self.assertEqual('ZBF', called_args[0])
def test_object_run_fast_track_zero_check_closed(self):
rat = [False]
class FakeFile(DiskFile):
def _quarantine(self, data_file, msg):
rat[0] = True
DiskFile._quarantine(self, data_file, msg)
self.setup_bad_zero_byte()
with mock.patch('swift.obj.diskfile.DiskFileManager.diskfile_cls',
FakeFile):
kwargs = {'mode': 'once'}
kwargs['zero_byte_fps'] = 50
self.auditor.run_audit(**kwargs)
quarantine_path = os.path.join(self.devices,
'sda', 'quarantined', 'objects')
self.assertTrue(os.path.isdir(quarantine_path))
self.assertTrue(rat[0])
@mock.patch.object(auditor.ObjectAuditor, 'run_audit')
@mock.patch('os.fork', return_value=0)
def test_with_inaccessible_object_location(self, mock_os_fork,
mock_run_audit):
# Need to ensure that any failures in run_audit do
# not prevent sys.exit() from running. Otherwise we get
# zombie processes.
e = OSError('permission denied')
mock_run_audit.side_effect = e
self.auditor = auditor.ObjectAuditor(self.conf)
self.assertRaises(SystemExit, self.auditor.fork_child, self)
def test_with_only_tombstone(self):
# sanity check that auditor doesn't touch solitary tombstones
ts_iter = make_timestamp_iter()
self.setup_bad_zero_byte(timestamp=next(ts_iter))
self.disk_file.delete(next(ts_iter))
files = os.listdir(self.disk_file._datadir)
self.assertEqual(1, len(files))
self.assertTrue(files[0].endswith('ts'))
kwargs = {'mode': 'once'}
self.auditor.run_audit(**kwargs)
files_after = os.listdir(self.disk_file._datadir)
self.assertEqual(files, files_after)
def test_with_tombstone_and_data(self):
# rsync replication could leave a tombstone and data file in object
# dir - verify they are both removed during audit
ts_iter = make_timestamp_iter()
ts_tomb = next(ts_iter)
ts_data = next(ts_iter)
self.setup_bad_zero_byte(timestamp=ts_data)
tomb_file_path = os.path.join(self.disk_file._datadir,
'%s.ts' % ts_tomb.internal)
with open(tomb_file_path, 'wb') as fd:
write_metadata(fd, {'X-Timestamp': ts_tomb.internal})
files = os.listdir(self.disk_file._datadir)
self.assertEqual(2, len(files))
self.assertTrue(os.path.basename(tomb_file_path) in files, files)
kwargs = {'mode': 'once'}
self.auditor.run_audit(**kwargs)
self.assertFalse(os.path.exists(self.disk_file._datadir))
def _audit_tombstone(self, conf, ts_tomb, zero_byte_fps=0):
self.auditor = auditor.ObjectAuditor(conf)
self.auditor.log_time = 0
# create tombstone and hashes.pkl file, ensuring the tombstone is not
# reclaimed by mocking time to be the tombstone time
with mock.patch('time.time', return_value=float(ts_tomb)):
# this delete will create an invalid hashes entry
self.disk_file.delete(ts_tomb)
# this get_hashes call will truncate the invalid hashes entry
self.disk_file.manager.get_hashes(
'sda', '0', [], self.disk_file.policy)
suffix = basename(dirname(self.disk_file._datadir))
part_dir = dirname(dirname(self.disk_file._datadir))
# sanity checks...
self.assertEqual(['%s.ts' % ts_tomb.internal],
os.listdir(self.disk_file._datadir))
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n'))
# Run auditor
self.auditor.run_audit(mode='once', zero_byte_fps=zero_byte_fps)
# sanity check - auditor should not remove tombstone file
self.assertEqual(['%s.ts' % ts_tomb.internal],
os.listdir(self.disk_file._datadir))
return part_dir, suffix
def test_non_reclaimable_tombstone(self):
# audit with a recent tombstone
ts_tomb = Timestamp(time.time() - 55)
part_dir, suffix = self._audit_tombstone(self.conf, ts_tomb)
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n'))
def test_reclaimable_tombstone(self):
# audit with a reclaimable tombstone
ts_tomb = Timestamp(time.time() - 604800)
part_dir, suffix = self._audit_tombstone(self.conf, ts_tomb)
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
hash_val = fp.read()
self.assertEqual(suffix.encode('ascii'), hash_val.strip(b'\n'))
def test_non_reclaimable_tombstone_with_custom_reclaim_age(self):
# audit with a tombstone newer than custom reclaim age
ts_tomb = Timestamp(time.time() - 604800)
conf = dict(self.conf)
conf['reclaim_age'] = 2 * 604800
part_dir, suffix = self._audit_tombstone(conf, ts_tomb)
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n'))
def test_reclaimable_tombstone_with_custom_reclaim_age(self):
# audit with a tombstone older than custom reclaim age
ts_tomb = Timestamp(time.time() - 55)
conf = dict(self.conf)
conf['reclaim_age'] = 10
part_dir, suffix = self._audit_tombstone(conf, ts_tomb)
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
hash_val = fp.read()
self.assertEqual(suffix.encode('ascii'), hash_val.strip(b'\n'))
def test_reclaimable_tombstone_with_zero_byte_fps(self):
# audit with a tombstone older than reclaim age by a zero_byte_fps
# worker does not invalidate the hash
ts_tomb = Timestamp(time.time() - 604800)
part_dir, suffix = self._audit_tombstone(
self.conf, ts_tomb, zero_byte_fps=50)
self.assertTrue(os.path.exists(os.path.join(part_dir, HASH_FILE)))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
self.assertTrue(os.path.exists(hash_invalid))
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n'))
def _test_expired_object_is_ignored(self, zero_byte_fps):
# verify that an expired object does not get mistaken for a tombstone
audit = auditor.ObjectAuditor(self.conf, logger=self.logger)
audit.log_time = 0
now = time.time()
write_diskfile(self.disk_file, Timestamp(now - 20),
extra_metadata={'X-Delete-At': now - 10})
files = os.listdir(self.disk_file._datadir)
self.assertTrue([f for f in files if f.endswith('.data')]) # sanity
# diskfile write appends to invalid hashes file
part_dir = dirname(dirname(self.disk_file._datadir))
hash_invalid = os.path.join(part_dir, HASH_INVALIDATIONS_FILE)
with open(hash_invalid, 'rb') as fp:
self.assertEqual(
basename(dirname(self.disk_file._datadir)).encode('ascii'),
fp.read().strip(b'\n')) # sanity check
# run the auditor...
with mock.patch.object(auditor, 'dump_recon_cache'):
audit.run_audit(mode='once', zero_byte_fps=zero_byte_fps)
# the auditor doesn't touch anything on the invalidation file
# (i.e. not truncate and add no entry)
with open(hash_invalid, 'rb') as fp:
self.assertEqual(
basename(dirname(self.disk_file._datadir)).encode('ascii'),
fp.read().strip(b'\n')) # sanity check
# this get_hashes call will truncate the invalid hashes entry
self.disk_file.manager.get_hashes(
'sda', '0', [], self.disk_file.policy)
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n')) # sanity check
# run the auditor, again...
with mock.patch.object(auditor, 'dump_recon_cache'):
audit.run_audit(mode='once', zero_byte_fps=zero_byte_fps)
# verify nothing changed
self.assertTrue(os.path.exists(self.disk_file._datadir))
self.assertEqual(files, os.listdir(self.disk_file._datadir))
self.assertFalse(audit.logger.get_lines_for_level('error'))
self.assertFalse(audit.logger.get_lines_for_level('warning'))
# and there was no hash invalidation
with open(hash_invalid, 'rb') as fp:
self.assertEqual(b'', fp.read().strip(b'\n'))
def test_expired_object_is_ignored(self):
self._test_expired_object_is_ignored(0)
def test_expired_object_is_ignored_with_zero_byte_fps(self):
self._test_expired_object_is_ignored(50)
def test_auditor_reclaim_age(self):
# if we don't have access to the replicator config section we'll use
# diskfile's default
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 86400 * 7)
# if the reclaim_age option is set explicitly we use that
self.conf['reclaim_age'] = '1800'
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 1800)
# if we have a real config we can be a little smarter
config_path = os.path.join(self.testdir, 'objserver.conf')
# if there is no object-replicator section we still have to fall back
# to default because we can't parse the config for that section!
stub_config = """
[object-auditor]
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 86400 * 7)
# verify reclaim_age is of auditor config value
stub_config = """
[object-replicator]
[object-auditor]
reclaim_age = 60
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 60)
# verify reclaim_age falls back to replicator config value
# if there is no auditor config value
config_path = os.path.join(self.testdir, 'objserver.conf')
stub_config = """
[object-replicator]
reclaim_age = 60
[object-auditor]
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 60)
# we'll prefer our own DEFAULT section to the replicator though
self.assertEqual(auditor_worker.rsync_tempfile_timeout,
replicator.DEFAULT_RSYNC_TIMEOUT + 900)
stub_config = """
[DEFAULT]
reclaim_age = 1209600
[object-replicator]
reclaim_age = 1800
[object-auditor]
"""
with open(config_path, 'w') as f:
f.write(textwrap.dedent(stub_config))
conf = readconf(config_path, 'object-auditor')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
router = auditor_worker.diskfile_router
for policy in POLICIES:
self.assertEqual(router[policy].reclaim_age, 1209600)
def test_sleeper(self):
with mock.patch(
'time.sleep', mock.MagicMock()) as mock_sleep:
my_auditor = auditor.ObjectAuditor(self.conf)
my_auditor._sleep()
mock_sleep.assert_called_with(30)
my_conf = dict(interval=2)
my_conf.update(self.conf)
my_auditor = auditor.ObjectAuditor(my_conf)
my_auditor._sleep()
mock_sleep.assert_called_with(2)
my_auditor = auditor.ObjectAuditor(self.conf)
my_auditor.interval = 2
my_auditor._sleep()
mock_sleep.assert_called_with(2)
def test_run_parallel_audit(self):
class StopForever(Exception):
pass
class Bogus(Exception):
pass
loop_error = Bogus('exception')
class LetMeOut(BaseException):
pass
class ObjectAuditorMock(object):
check_args = ()
check_kwargs = {}
check_device_dir = None
fork_called = 0
master = 0
wait_called = 0
def mock_run(self, *args, **kwargs):
self.check_args = args
self.check_kwargs = kwargs
if 'zero_byte_fps' in kwargs:
self.check_device_dir = kwargs.get('device_dirs')
def mock_sleep_stop(self):
raise StopForever('stop')
def mock_sleep_continue(self):
return
def mock_audit_loop_error(self, parent, zbo_fps,
override_devices=None, **kwargs):
raise loop_error
def mock_fork(self):
self.fork_called += 1
if self.master:
return self.fork_called
else:
return 0
def mock_wait(self):
self.wait_called += 1
return (self.wait_called, 0)
def mock_signal(self, sig, action):
pass
def mock_exit(self):
pass
for i in string.ascii_letters[2:26]:
mkdirs(os.path.join(self.devices, 'sd%s' % i))
my_auditor = auditor.ObjectAuditor(dict(devices=self.devices,
mount_check='false',
zero_byte_files_per_second=89,
concurrency=1))
mocker = ObjectAuditorMock()
my_auditor.logger.exception = mock.MagicMock()
real_audit_loop = my_auditor.audit_loop
my_auditor.audit_loop = mocker.mock_audit_loop_error
my_auditor.run_audit = mocker.mock_run
was_fork = os.fork
was_wait = os.wait
was_signal = signal.signal
was_exit = sys.exit
os.fork = mocker.mock_fork
os.wait = mocker.mock_wait
signal.signal = mocker.mock_signal
sys.exit = mocker.mock_exit
try:
my_auditor._sleep = mocker.mock_sleep_stop
my_auditor.run_once(zero_byte_fps=50)
my_auditor.logger.exception.assert_called_once_with(
'ERROR auditing: %s', loop_error)
my_auditor.logger.exception.reset_mock()
self.assertRaises(StopForever, my_auditor.run_forever)
my_auditor.logger.exception.assert_called_once_with(
'ERROR auditing: %s', loop_error)
my_auditor.audit_loop = real_audit_loop
# sleep between ZBF scanner forks
self.assertRaises(StopForever, my_auditor.fork_child, True, True)
mocker.fork_called = 0
signal.signal = was_signal
sys.exit = was_exit
self.assertRaises(StopForever,
my_auditor.run_forever, zero_byte_fps=50)
self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 50)
self.assertEqual(mocker.fork_called, 0)
self.assertRaises(SystemExit, my_auditor.run_once)
self.assertEqual(mocker.fork_called, 1)
self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89)
self.assertEqual(mocker.check_device_dir, [])
self.assertEqual(mocker.check_args, ())
device_list = ['sd%s' % i for i in string.ascii_letters[2:10]]
device_string = ','.join(device_list)
device_string_bogus = device_string + ',bogus'
mocker.fork_called = 0
self.assertRaises(SystemExit, my_auditor.run_once,
devices=device_string_bogus)
self.assertEqual(mocker.fork_called, 1)
self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89)
self.assertEqual(sorted(mocker.check_device_dir), device_list)
mocker.master = 1
mocker.fork_called = 0
self.assertRaises(StopForever, my_auditor.run_forever)
# Fork or Wait are called greate than or equal to 2 times in the
# main process. 2 times if zbf run once and 3 times if zbf run
# again
self.assertGreaterEqual(mocker.fork_called, 2)
self.assertGreaterEqual(mocker.wait_called, 2)
my_auditor._sleep = mocker.mock_sleep_continue
my_auditor.audit_loop = works_only_once(my_auditor.audit_loop,
LetMeOut())
my_auditor.concurrency = 2
mocker.fork_called = 0
mocker.wait_called = 0
self.assertRaises(LetMeOut, my_auditor.run_forever)
# Fork or Wait are called greater than or equal to
# no. of devices + (no. of devices)/2 + 1 times in main process
no_devices = len(os.listdir(self.devices))
self.assertGreaterEqual(mocker.fork_called, no_devices +
no_devices / 2 + 1)
self.assertGreaterEqual(mocker.wait_called, no_devices +
no_devices / 2 + 1)
finally:
os.fork = was_fork
os.wait = was_wait
def test_run_audit_once(self):
my_auditor = auditor.ObjectAuditor(dict(devices=self.devices,
mount_check='false',
zero_byte_files_per_second=89,
concurrency=1))
forked_pids = []
next_zbf_pid = [2]
next_normal_pid = [1001]
outstanding_pids = [[]]
def fake_fork_child(**kwargs):
if len(forked_pids) > 10:
# something's gone horribly wrong
raise BaseException("forking too much")
# ZBF pids are all smaller than the normal-audit pids; this way
# we can return them first.
#
# Also, ZBF pids are even and normal-audit pids are odd; this is
# so humans seeing this test fail can better tell what's happening.
if kwargs.get('zero_byte_fps'):
pid = next_zbf_pid[0]
next_zbf_pid[0] += 2
else:
pid = next_normal_pid[0]
next_normal_pid[0] += 2
outstanding_pids[0].append(pid)
forked_pids.append(pid)
return pid
def fake_os_wait():
# Smallest pid first; that's ZBF if we have one, else normal
outstanding_pids[0].sort()
pid = outstanding_pids[0].pop(0)
return (pid, 0) # (pid, status)
with mock.patch("swift.obj.auditor.os.wait", fake_os_wait), \
mock.patch.object(my_auditor, 'fork_child', fake_fork_child), \
mock.patch.object(my_auditor, '_sleep', lambda *a: None):
my_auditor.run_once()
self.assertEqual(sorted(forked_pids), [2, 1001])
def test_run_audit_once_zbfps(self):
my_auditor = auditor.ObjectAuditor(dict(devices=self.devices,
mount_check='false',
zero_byte_files_per_second=89,
concurrency=1,
recon_cache_path=self.testdir))
with mock.patch.object(my_auditor, '_sleep', lambda *a: None):
my_auditor.run_once(zero_byte_fps=50)
with open(self.rcache) as fd:
# there's no objects to audit so expect no stats; this assertion
# may change if https://bugs.launchpad.net/swift/+bug/1704858 is
# fixed
self.assertEqual({}, json.load(fd))
# check recon cache stays clean after a second run
with mock.patch.object(my_auditor, '_sleep', lambda *a: None):
my_auditor.run_once(zero_byte_fps=50)
with open(self.rcache) as fd:
self.assertEqual({}, json.load(fd))
ts = Timestamp(time.time())
with self.disk_file.create() as writer:
metadata = {
'ETag': md5(b'', usedforsecurity=False).hexdigest(),
'X-Timestamp': ts.normal,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
writer.commit(ts)
# check recon cache stays clean after a second run
with mock.patch.object(my_auditor, '_sleep', lambda *a: None):
my_auditor.run_once(zero_byte_fps=50)
with open(self.rcache) as fd:
self.assertEqual({
'object_auditor_stats_ZBF': {
'audit_time': 0,
'bytes_processed': 0,
'errors': 0,
'passes': 1,
'quarantined': 0,
'start_time': mock.ANY}},
json.load(fd))
def test_run_parallel_audit_once(self):
my_auditor = auditor.ObjectAuditor(
dict(devices=self.devices, mount_check='false',
zero_byte_files_per_second=89, concurrency=2))
# ZBF pids are smaller than the normal-audit pids; this way we can
# return them first from our mocked os.wait().
#
# Also, ZBF pids are even and normal-audit pids are odd; this is so
# humans seeing this test fail can better tell what's happening.
forked_pids = []
next_zbf_pid = [2]
next_normal_pid = [1001]
outstanding_pids = [[]]
def fake_fork_child(**kwargs):
if len(forked_pids) > 10:
# something's gone horribly wrong; try not to hang the test
# run because of it
raise BaseException("forking too much")
if kwargs.get('zero_byte_fps'):
pid = next_zbf_pid[0]
next_zbf_pid[0] += 2
else:
pid = next_normal_pid[0]
next_normal_pid[0] += 2
outstanding_pids[0].append(pid)
forked_pids.append(pid)
return pid
def fake_os_wait():
if not outstanding_pids[0]:
raise BaseException("nobody waiting")
# ZBF auditor finishes first
outstanding_pids[0].sort()
pid = outstanding_pids[0].pop(0)
return (pid, 0) # (pid, status)
# make sure we've got enough devs that the ZBF auditor can finish
# before all the normal auditors have been started
mkdirs(os.path.join(self.devices, 'sdc'))
mkdirs(os.path.join(self.devices, 'sdd'))
with mock.patch("swift.obj.auditor.os.wait", fake_os_wait), \
mock.patch.object(my_auditor, 'fork_child', fake_fork_child), \
mock.patch.object(my_auditor, '_sleep', lambda *a: None):
my_auditor.run_once()
self.assertEqual(sorted(forked_pids), [2, 1001, 1003, 1005, 1007])
def test_run_parallel_audit_once_failed_fork(self):
my_auditor = auditor.ObjectAuditor(
dict(devices=self.devices, mount_check='false',
concurrency=2))
start_pid = [1001]
outstanding_pids = []
failed_once = [False]
def failing_fork(**kwargs):
# this fork fails only on the 2nd call
# it's enough to cause the growth of orphaned child processes
if len(outstanding_pids) > 0 and not failed_once[0]:
failed_once[0] = True
raise OSError
start_pid[0] += 2
pid = start_pid[0]
outstanding_pids.append(pid)
return pid
def fake_wait():
return outstanding_pids.pop(0), 0
with mock.patch("swift.obj.auditor.os.wait", fake_wait), \
mock.patch.object(my_auditor, 'fork_child', failing_fork), \
mock.patch.object(my_auditor, '_sleep', lambda *a: None):
for i in range(3):
my_auditor.run_once()
self.assertEqual(len(outstanding_pids), 0,
"orphaned children left {0}, expected 0."
.format(outstanding_pids))
@mock.patch('pkg_resources.iter_entry_points', no_audit_watchers)
@patch_policies(_mocked_policies)
class TestAuditWatchers(TestAuditorBase):
def setUp(self):
super(TestAuditWatchers, self).setUp()
timestamp = Timestamp(time.time())
disk_file = self.df_mgr.get_diskfile(
'sda', '0', 'a', 'c', 'o0', policy=POLICIES.legacy)
data = b'0' * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
metadata = {
'ETag': etag.hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': str(len(data)),
'X-Object-Meta-Flavor': 'banana',
}
writer.put(metadata)
# The commit does nothing; we keep it for code copy-paste with EC.
writer.commit(timestamp)
disk_file = self.df_mgr.get_diskfile(
'sda', '0', 'a', 'c', 'o1', policy=POLICIES.legacy)
data = b'1' * 2048
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
metadata = {
'ETag': etag.hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': str(len(data)),
'X-Object-Meta-Flavor': 'orange',
}
writer.put(metadata)
writer.commit(timestamp)
frag_0 = self.disk_file_ec.policy.pyeclib_driver.encode(
b'x' * self.disk_file_ec.policy.ec_segment_size)[0]
etag = md5()
with self.disk_file_ec.create() as writer:
writer.write(frag_0)
etag.update(frag_0)
metadata = {
'ETag': etag.hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': str(len(frag_0)),
'X-Object-Meta-Flavor': 'peach',
'X-Object-Sysmeta-Ec-Frag-Index': '1',
'X-Object-Sysmeta-Ec-Etag': 'fake-etag',
}
writer.put(metadata)
writer.commit(timestamp)
def test_watchers(self):
calls = []
class TestWatcher(object):
def __init__(self, conf, logger):
self._started = False
self._ended = False
calls.append(["__init__", conf, logger])
# Make sure the logger is capable of quacking like a logger
logger.debug("getting started")
def start(self, audit_type, **other_kwargs):
if self._started:
raise Exception("don't call it twice")
self._started = True
calls.append(['start', audit_type])
def see_object(self, object_metadata,
data_file_path, **other_kwargs):
calls.append(['see_object', object_metadata,
data_file_path, other_kwargs])
def end(self, **other_kwargs):
if self._ended:
raise Exception("don't call it twice")
self._ended = True
calls.append(['end'])
conf = self.conf.copy()
conf['watchers'] = 'test_watcher1'
conf['__file__'] = '/etc/swift/swift.conf'
ret_config = {'swift#dark_data': {'action': 'log'}}
with mock.patch('swift.obj.auditor.parse_prefixed_conf',
return_value=ret_config), \
mock.patch('swift.obj.auditor.load_pkg_resource',
side_effect=[TestWatcher]) as mock_load, \
mock.patch('swift.obj.auditor.get_logger',
lambda *a, **kw: self.logger):
my_auditor = auditor.ObjectAuditor(conf)
self.assertEqual(mock_load.mock_calls, [
mock.call('swift.object_audit_watcher', 'test_watcher1'),
])
my_auditor.run_audit(mode='once', zero_byte_fps=float("inf"))
self.assertEqual(len(calls), 6)
self.assertEqual(calls[0], ["__init__", conf, mock.ANY])
self.assertIsInstance(calls[0][2], PrefixLoggerAdapter)
self.assertIs(calls[0][2].logger, self.logger)
self.assertEqual(calls[1], ["start", "ZBF"])
self.assertEqual(calls[2][0], "see_object")
self.assertEqual(calls[3][0], "see_object")
# The order in which the auditor finds things on the filesystem is
# irrelevant; what matters is that it finds all the things.
calls[2:5] = sorted(calls[2:5], key=lambda item: item[1]['name'])
self._assertDictContainsSubset({'name': '/a/c/o0',
'X-Object-Meta-Flavor': 'banana'},
calls[2][1])
self.assertIn('node/sda/objects/0/', calls[2][2]) # data_file_path
self.assertTrue(calls[2][2].endswith('.data')) # data_file_path
self.assertEqual({}, calls[2][3])
self._assertDictContainsSubset({'name': '/a/c/o1',
'X-Object-Meta-Flavor': 'orange'},
calls[3][1])
self.assertIn('node/sda/objects/0/', calls[3][2]) # data_file_path
self.assertTrue(calls[3][2].endswith('.data')) # data_file_path
self.assertEqual({}, calls[3][3])
self._assertDictContainsSubset({'name': '/a/c_ec/o',
'X-Object-Meta-Flavor': 'peach'},
calls[4][1])
self.assertIn('node/sda/objects-2/0/', calls[4][2]) # data_file_path
self.assertTrue(calls[4][2].endswith('.data')) # data_file_path
self.assertEqual({}, calls[4][3])
self.assertEqual(calls[5], ["end"])
log_lines = self.logger.get_lines_for_level('debug')
self.assertIn(
"[audit-watcher test_watcher1] getting started",
log_lines)
def test_builtin_watchers(self):
# Yep, back-channel signaling in tests.
sentinel = 'DARK'
timestamp = Timestamp(time.time())
disk_file = self.df_mgr.get_diskfile(
'sda', '0', 'a', sentinel, 'o2', policy=POLICIES.legacy)
data = b'2' * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
metadata = {
'ETag': etag.hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': str(len(data)),
'X-Object-Meta-Flavor': 'mango',
}
writer.put(metadata)
writer.commit(timestamp)
def fake_direct_get_container(node, part, account, container,
prefix=None, limit=None):
self.assertEqual(part, 1)
self.assertEqual(limit, 1)
if container == sentinel:
return {}, []
# The returned entry is not abbreviated, but is full of nonsese.
entry = {'bytes': 30968411,
'hash': '60303f4122966fe5925f045eb52d1129',
'name': '%s' % prefix,
'content_type': 'video/mp4',
'last_modified': '2017-08-15T03:30:57.693210'}
return {}, [entry]
conf = self.conf.copy()
conf['watchers'] = 'test_watcher1'
conf['__file__'] = '/etc/swift/swift.conf'
# with default watcher config the DARK object will not be older than
# grace_age so will not be logged
ret_config = {'test_watcher1': {'action': 'log'}}
with mock.patch('swift.obj.auditor.parse_prefixed_conf',
return_value=ret_config), \
mock.patch('swift.obj.auditor.load_pkg_resource',
side_effect=[DarkDataWatcher]):
my_auditor = auditor.ObjectAuditor(conf, logger=self.logger)
with mock.patch('swift.obj.watchers.dark_data.Ring', FakeRing1), \
mock.patch("swift.obj.watchers.dark_data.direct_get_container",
fake_direct_get_container):
my_auditor.run_audit(mode='once')
log_lines = self.logger.get_lines_for_level('info')
self.assertIn(
'[audit-watcher test_watcher1] total unknown 0 ok 4 dark 0',
log_lines)
self.logger.clear()
# with grace_age=0 the DARK object will be older than
# grace_age so will be logged
ret_config = {'test_watcher1': {'action': 'log', 'grace_age': '0'}}
with mock.patch('swift.obj.auditor.parse_prefixed_conf',
return_value=ret_config), \
mock.patch('swift.obj.auditor.load_pkg_resource',
side_effect=[DarkDataWatcher]):
my_auditor = auditor.ObjectAuditor(conf, logger=self.logger)
with mock.patch('swift.obj.watchers.dark_data.Ring', FakeRing1), \
mock.patch("swift.obj.watchers.dark_data.direct_get_container",
fake_direct_get_container):
my_auditor.run_audit(mode='once')
log_lines = self.logger.get_lines_for_level('info')
self.assertIn(
'[audit-watcher test_watcher1] total unknown 0 ok 3 dark 1',
log_lines)
def test_dark_data_watcher_init(self):
conf = {}
with mock.patch('swift.obj.watchers.dark_data.Ring', FakeRing1):
watcher = DarkDataWatcher(conf, self.logger)
self.assertEqual(self.logger, watcher.logger)
self.assertEqual(604800, watcher.grace_age)
self.assertEqual('log', watcher.dark_data_policy)
conf = {'grace_age': 360, 'action': 'delete'}
with mock.patch('swift.obj.watchers.dark_data.Ring', FakeRing1):
watcher = DarkDataWatcher(conf, self.logger)
self.assertEqual(self.logger, watcher.logger)
self.assertEqual(360, watcher.grace_age)
self.assertEqual('delete', watcher.dark_data_policy)
conf = {'grace_age': 0, 'action': 'invalid'}
with mock.patch('swift.obj.watchers.dark_data.Ring', FakeRing1):
watcher = DarkDataWatcher(conf, self.logger)
self.assertEqual(self.logger, watcher.logger)
self.assertEqual(0, watcher.grace_age)
self.assertEqual('log', watcher.dark_data_policy)
def test_dark_data_agreement(self):
# The dark data watcher only sees an object as dark if all container
# servers in the ring reply without an error and return an empty
# listing. So, we have the following permutations for an object:
#
# Container Servers Result
# CS1 CS2
# Listed Listed Good - the baseline result
# Listed Error Good
# Listed Not listed Good
# Error Error Unknown - the baseline failure
# Not listed Error Unknown
# Not listed Not listed Dark - the only such result!
#
scenario = [
{'cr': ['L', 'L'], 'res': 'G'},
{'cr': ['L', 'E'], 'res': 'G'},
{'cr': ['L', 'N'], 'res': 'G'},
{'cr': ['E', 'E'], 'res': 'U'},
{'cr': ['N', 'E'], 'res': 'U'},
{'cr': ['N', 'N'], 'res': 'D'}]
conf = self.conf.copy()
conf['watchers'] = 'test_watcher1'
conf['__file__'] = '/etc/swift/swift.conf'
ret_config = {'test_watcher1': {'action': 'log', 'grace_age': '0'}}
with mock.patch('swift.obj.auditor.parse_prefixed_conf',
return_value=ret_config), \
mock.patch('swift.obj.auditor.load_pkg_resource',
side_effect=[DarkDataWatcher]):
my_auditor = auditor.ObjectAuditor(conf, logger=self.logger)
for cur in scenario:
def fake_direct_get_container(node, part, account, container,
prefix=None, limit=None):
self.assertEqual(part, 1)
self.assertEqual(limit, 1)
reply_type = cur['cr'][int(node['id']) - 1]
if reply_type == 'E':
raise ClientException("Emulated container server error")
if reply_type == 'N':
return {}, []
entry = {'bytes': 30968411,
'hash': '60303f4122966fe5925f045eb52d1129',
'name': '%s' % prefix,
'content_type': 'video/mp4',
'last_modified': '2017-08-15T03:30:57.693210'}
return {}, [entry]
self.logger.clear()
namespace = 'swift.obj.watchers.dark_data.'
with mock.patch(namespace + 'Ring', FakeRing2), \
mock.patch(namespace + 'direct_get_container',
fake_direct_get_container):
my_auditor.run_audit(mode='once')
# We inherit a common setUp with 3 objects, so 3 everywhere.
if cur['res'] == 'U':
unk_exp, ok_exp, dark_exp = 3, 0, 0
elif cur['res'] == 'G':
unk_exp, ok_exp, dark_exp = 0, 3, 0
else:
unk_exp, ok_exp, dark_exp = 0, 0, 3
log_lines = self.logger.get_lines_for_level('info')
for line in log_lines:
if not line.startswith('[audit-watcher test_watcher1] total'):
continue
words = line.split()
if not (words[3] == 'unknown' and
words[5] == 'ok' and
words[7] == 'dark'):
unittest.TestCase.fail('Syntax error in %r' % (line,))
try:
unk_cnt = int(words[4])
ok_cnt = int(words[6])
dark_cnt = int(words[8])
except ValueError:
unittest.TestCase.fail('Bad value in %r' % (line,))
if unk_cnt != unk_exp or ok_cnt != ok_exp or dark_cnt != dark_exp:
fmt = 'Expected unknown %d ok %d dark %d, got %r, for nodes %r'
msg = fmt % (unk_exp, ok_exp, dark_exp,
' '.join(words[3:]), cur['cr'])
self.fail(msg=msg)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_auditor.py |
# Copyright (c) 2013 - 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from swift.common import utils
from swift.common.storage_policy import POLICIES
from swift.common.utils import Timestamp, md5
def write_diskfile(df, timestamp, data=b'test data', frag_index=None,
commit=True, legacy_durable=False, extra_metadata=None):
# Helper method to write some data and metadata to a diskfile.
# Optionally do not commit the diskfile, or commit but using a legacy
# durable file
with df.create() as writer:
writer.write(data)
metadata = {
'ETag': md5(data, usedforsecurity=False).hexdigest(),
'X-Timestamp': timestamp.internal,
'Content-Length': str(len(data)),
}
if extra_metadata:
metadata.update(extra_metadata)
if frag_index is not None:
metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index)
metadata['X-Object-Sysmeta-Ec-Etag'] = 'fake-etag'
writer.put(metadata)
if commit and legacy_durable:
# simulate legacy .durable file creation
durable_file = os.path.join(df._datadir,
timestamp.internal + '.durable')
with open(durable_file, 'wb'):
pass
elif commit:
writer.commit(timestamp)
# else: don't make it durable
return metadata
class BaseTest(unittest.TestCase):
def setUp(self):
self.device = 'dev'
self.partition = '9'
self.tmpdir = tempfile.mkdtemp()
# sender side setup
self.tx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
utils.mkdirs(os.path.join(self.tx_testdir, self.device))
self.daemon_conf = {
'devices': self.tx_testdir,
'mount_check': 'false',
}
# daemon will be set in subclass setUp
self.daemon = None
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def _make_diskfile(self, device='dev', partition='9',
account='a', container='c', obj='o', body=b'test',
extra_metadata=None, policy=None,
frag_index=None, timestamp=None, df_mgr=None,
commit=True, verify=True, **kwargs):
policy = policy or POLICIES.legacy
object_parts = account, container, obj
timestamp = Timestamp.now() if timestamp is None else timestamp
if df_mgr is None:
df_mgr = self.daemon._df_router[policy]
df = df_mgr.get_diskfile(
device, partition, *object_parts, policy=policy,
frag_index=frag_index, **kwargs)
write_diskfile(df, timestamp, data=body, extra_metadata=extra_metadata,
commit=commit)
if commit and verify:
# when we write and commit stub data, sanity check it's readable
# and not quarantined because of any validation check
with df.open():
self.assertEqual(b''.join(df.reader()), body)
# sanity checks
listing = os.listdir(df._datadir)
self.assertTrue(listing)
for filename in listing:
self.assertTrue(filename.startswith(timestamp.internal))
return df
def _make_open_diskfile(self, device='dev', partition='9',
account='a', container='c', obj='o', body=b'test',
extra_metadata=None, policy=None,
frag_index=None, timestamp=None, df_mgr=None,
commit=True, **kwargs):
df = self._make_diskfile(device, partition, account, container, obj,
body, extra_metadata, policy, frag_index,
timestamp, df_mgr, commit, **kwargs)
df.open()
return df
| swift-master | test/unit/obj/common.py |
# coding: utf-8
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.server"""
import six.moves.cPickle as pickle
import datetime
import json
import errno
import operator
import os
import mock
import six
from six import StringIO
import unittest
import math
import random
from shutil import rmtree
from time import gmtime, strftime, time, struct_time
from tempfile import mkdtemp
from collections import defaultdict
from contextlib import contextmanager
from textwrap import dedent
from eventlet import sleep, spawn, wsgi, Timeout, tpool, greenthread
from eventlet.green import httplib
from swift import __version__ as swift_version
from swift.common.http import is_success
from test import listen_zero, BaseTestCase
from test.debug_logger import debug_logger
from test.unit import mocked_http_conn, \
make_timestamp_iter, DEFAULT_TEST_EC_TYPE, skip_if_no_xattrs, \
connect_tcp, readuntil2crlfs, patch_policies, encode_frag_archive_bodies, \
mock_check_drive
from swift.obj import server as object_server
from swift.obj import updater
from swift.obj import diskfile
from swift.common import utils, bufferedhttp
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
NullLogger, storage_directory, public, replication, encode_timestamps, \
Timestamp, md5
from swift.common import constraints
from swift.common.request_helpers import get_reserved_name
from swift.common.swob import Request, WsgiBytesIO, \
HTTPRequestedRangeNotSatisfiable
from swift.common.splice import splice
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.common.exceptions import DiskFileDeviceUnavailable, \
DiskFileNoSpace, DiskFileQuarantined
from swift.common.wsgi import init_request_processor
def mock_time(*args, **kwargs):
return 5000.0
test_policies = [
StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
]
@contextmanager
def fake_spawn():
"""
Spawn and capture the result so we can later wait on it. This means we can
test code executing in a greenthread but still wait() on the result to
ensure that the method has completed.
"""
greenlets = []
def _inner_fake_spawn(func, *a, **kw):
gt = greenthread.spawn(func, *a, **kw)
greenlets.append(gt)
return gt
object_server.spawn = _inner_fake_spawn
with mock.patch('swift.obj.server.spawn', _inner_fake_spawn):
try:
yield
finally:
for gt in greenlets:
gt.wait()
class TestTpoolSize(unittest.TestCase):
def test_default_config(self):
with mock.patch('eventlet.tpool.set_num_threads') as mock_snt:
object_server.ObjectController({})
self.assertEqual([], mock_snt.mock_calls)
def test_explicit_setting(self):
conf = {'eventlet_tpool_num_threads': '17'}
with mock.patch('eventlet.tpool.set_num_threads') as mock_snt:
object_server.ObjectController(conf)
self.assertEqual([mock.call(17)], mock_snt.mock_calls)
def test_servers_per_port_no_explicit_setting(self):
conf = {'servers_per_port': '3'}
with mock.patch('eventlet.tpool.set_num_threads') as mock_snt:
object_server.ObjectController(conf)
self.assertEqual([mock.call(1)], mock_snt.mock_calls)
def test_servers_per_port_with_explicit_setting(self):
conf = {'eventlet_tpool_num_threads': '17',
'servers_per_port': '3'}
with mock.patch('eventlet.tpool.set_num_threads') as mock_snt:
object_server.ObjectController(conf)
self.assertEqual([mock.call(17)], mock_snt.mock_calls)
def test_servers_per_port_empty(self):
# run_wsgi is robust to this, so we should be too
conf = {'servers_per_port': ''}
with mock.patch('eventlet.tpool.set_num_threads') as mock_snt:
object_server.ObjectController(conf)
self.assertEqual([], mock_snt.mock_calls)
@patch_policies(test_policies)
class TestObjectController(BaseTestCase):
"""Test swift.obj.server.ObjectController"""
def setUp(self):
"""Set up for testing swift.object.server.ObjectController"""
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda1'))
self.conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0}
self.logger = debug_logger('test-object-controller')
self.object_controller = object_server.ObjectController(
self.conf, logger=self.logger)
self.object_controller.bytes_per_sync = 1
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.df_mgr = diskfile.DiskFileManager(self.conf,
self.object_controller.logger)
self.ts = make_timestamp_iter()
self.ec_policies = [p for p in POLICIES if p.policy_type == EC_POLICY]
def tearDown(self):
"""Tear down for testing swift.object.server.ObjectController"""
rmtree(self.tmpdir)
tpool.execute = self._orig_tpool_exc
def _stage_tmp_dir(self, policy):
mkdirs(os.path.join(self.testdir, 'sda1',
diskfile.get_tmp_dir(policy)))
def iter_policies(self):
for policy in POLICIES:
self.policy = policy
yield policy
def test_init(self):
conf = {
'devices': self.testdir,
'mount_check': 'false',
'container_update_timeout': 0.0,
}
app = object_server.ObjectController(conf, logger=self.logger)
self.assertEqual(app.container_update_timeout, 0.0)
self.assertEqual(app.auto_create_account_prefix, '.')
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
conf['auto_create_account_prefix'] = '-'
app = object_server.ObjectController(conf, logger=self.logger)
self.assertEqual(app.auto_create_account_prefix, '-')
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'
])
def check_all_api_methods(self, obj_name='o', alt_res=None):
path = '/sda1/p/a/c/%s' % obj_name
body = b'SPECIAL_STRING'
op_table = {
"PUT": (body, alt_res or 201, b''), # create one
"GET": (b'', alt_res or 200, body), # check it
"POST": (b'', alt_res or 202, b''), # update it
"HEAD": (b'', alt_res or 200, b''), # head it
"DELETE": (b'', alt_res or 204, b'') # delete it
}
for method in ["PUT", "GET", "POST", "HEAD", "DELETE"]:
in_body, res, out_body = op_table[method]
timestamp = normalize_timestamp(time())
req = Request.blank(
path, environ={'REQUEST_METHOD': method},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = in_body
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, res)
if out_body and (200 <= res < 300):
self.assertEqual(resp.body, out_body)
def test_REQUEST_SPECIAL_CHARS(self):
obj = 'special昆%20/%'
if six.PY3:
# The path argument of Request.blank() is a WSGI string, somehow
obj = obj.encode('utf-8').decode('latin-1')
self.check_all_api_methods(obj)
def test_device_unavailable(self):
def raise_disk_unavail(*args, **kwargs):
raise DiskFileDeviceUnavailable()
self.object_controller.get_diskfile = raise_disk_unavail
self.check_all_api_methods(alt_res=507)
def test_allowed_headers(self):
dah = ['content-disposition', 'content-encoding', 'x-delete-at',
'x-object-manifest', 'x-static-large-object']
conf = {'devices': self.testdir, 'mount_check': 'false',
'allowed_headers': ','.join(['content-length'] + dah)}
self.object_controller = object_server.ObjectController(
conf, logger=debug_logger())
self.assertEqual(self.object_controller.allowed_headers, set(dah))
def test_POST_update_meta(self):
# Test swift.obj.server.ObjectController.POST
original_headers = self.object_controller.allowed_headers
test_headers = 'content-encoding foo bar'.split()
self.object_controller.allowed_headers = set(test_headers)
put_timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': put_timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Object-Sysmeta-Color': 'blue',
'X-Object-Transient-Sysmeta-Shape': 'circle',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = b'VERIFY'
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'Etag': etag,
})
post_timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': post_timestamp,
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'x-object-meta-t\xc3\xa8st': 'm\xc3\xa8ta',
'X-Backend-Replication-Headers':
'x-object-meta-t\xc3\xa8st',
'Content-Encoding': 'gzip',
'Foo': 'fooheader',
'Bar': 'barheader'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=headers)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'X-Backend-Content-Type': 'application/x-test',
'X-Object-Sysmeta-Color': 'blue',
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
expected_headers = {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'blue',
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'X-Object-Meta-T\xc3\xa8St': 'm\xc3\xa8ta',
'Foo': 'fooheader',
'Bar': 'barheader',
'Content-Encoding': 'gzip',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
}
self.assertEqual(dict(resp.headers), expected_headers)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), expected_headers)
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'X-Object-Sysmeta-Color': 'red',
'Content-Type': 'application/x-test2'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'X-Backend-Content-Type': 'application/x-test2',
'X-Object-Sysmeta-Color': 'blue',
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test2',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'blue',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
# test defaults
self.object_controller.allowed_headers = original_headers
put_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-1': 'One',
'X-Object-Manifest': 'c/bar',
'Content-Encoding': 'gzip',
'Content-Disposition': 'bar',
'X-Static-Large-Object': 'True',
})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'Etag': etag,
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-1': 'One',
'Content-Encoding': 'gzip',
'X-Object-Manifest': 'c/bar',
'Content-Disposition': 'bar',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': put_timestamp,
'X-Timestamp': put_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(put_timestamp)))),
})
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'X-Object-Meta-3': 'Three',
'Foo': 'fooheader',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'X-Backend-Content-Type': 'application/x-test',
'X-Object-Sysmeta-Color': 'red',
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-3': 'Three',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
# Test for empty metadata
post_timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-3': ''})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': str(len(resp.body)),
'X-Backend-Content-Type': 'application/x-test',
'X-Object-Sysmeta-Color': 'red',
})
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Object-Sysmeta-Color': 'red',
'X-Object-Meta-3': '',
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': post_timestamp,
'X-Timestamp': post_timestamp,
'X-Backend-Data-Timestamp': put_timestamp,
'X-Backend-Durable-Timestamp': put_timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(post_timestamp)))),
})
def test_POST_old_timestamp(self):
ts = time()
orig_timestamp = utils.Timestamp(ts).internal
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': orig_timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# Same timestamp should result in 409
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': orig_timestamp,
'X-Object-Meta-3': 'Three',
'X-Object-Meta-4': 'Four',
'Content-Encoding': 'gzip',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
# Earlier timestamp should result in 409
timestamp = normalize_timestamp(ts - 1)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-5': 'Five',
'X-Object-Meta-6': 'Six',
'Content-Encoding': 'gzip',
'Content-Type': 'application/x-test'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
def test_POST_conflicts_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_post1 = next(self.ts).internal
t_post2 = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post2})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post1})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_post2 + '.meta')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post1 + '.meta')
self.assertFalse(os.path.isfile(meta_file))
def test_POST_not_exist(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/fail',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_POST_invalid_path(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp,
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_no_timestamp(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_bad_timestamp(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'bad',
'X-Object-Meta-1': 'One',
'X-Object-Meta-2': 'Two',
'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_POST_container_connection(self):
# Test that POST does call container_update and returns success
# whether update to container server succeeds or fails
def mock_http_connect(calls, response, with_exc=False):
class FakeConn(object):
def __init__(self, calls, status, with_exc):
self.calls = calls
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
def getresponse(self):
calls[0] += 1
if self.with_exc:
raise Exception('test')
return self
def read(self, amt=None):
return b''
return lambda *args, **kwargs: FakeConn(calls, response, with_exc)
ts = time()
timestamp = normalize_timestamp(ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '0'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 1),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 202)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 2),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 202, with_exc=True)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(ts + 3),
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new2'})
calls = [0]
with mock.patch.object(object_server, 'http_connect',
mock_http_connect(calls, 500)):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def _test_POST_container_updates(self, policy, update_etag=None):
# Test that POST requests result in correct calls to container_update
t = [next(self.ts) for _ in range(0, 5)]
calls_made = []
update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6'
def mock_container_update(ctlr, op, account, container, obj, request,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
body = 'test'
headers = {
'X-Timestamp': t[1].internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
# EC fragments will typically have a different size to the body and
# for small bodies the fragments may be longer. For this test all
# that matters is that the fragment and body lengths differ.
body = body + 'ec_overhead'
headers['X-Backend-Container-Update-Override-Etag'] = update_etag
headers['X-Backend-Container-Update-Override-Size'] = '4'
headers['X-Object-Sysmeta-Ec-Etag'] = update_etag
headers['X-Object-Sysmeta-Ec-Content-Length'] = '4'
headers['X-Object-Sysmeta-Ec-Frag-Index'] = 2
headers['Content-Length'] = str(len(body))
req = Request.blank('/sda1/p/a/c/o', body=body,
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'application/octet-stream;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with no metadata newer than the data should return 409,
# container update not expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[0].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
t[1].internal)
self.assertEqual(0, len(calls_made))
# POST with newer metadata returns success and container update
# is expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[3].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'application/octet-stream;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[1].internal,
'x-meta-timestamp': t[3].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with no metadata newer than existing metadata should return
# 409, container update not expected
calls_made = []
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
t[3].internal)
self.assertEqual(0, len(calls_made))
# POST with newer content-type but older metadata returns success
# and container update is expected newer content-type should have
# existing swift_bytes appended
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[2].internal,
'Content-Type': 'text/plain',
'Content-Type-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/plain;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[2].internal,
'x-meta-timestamp': t[3].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with older content-type but newer metadata returns success
# and container update is expected
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[4].internal,
'Content-Type': 'older',
'Content-Type-Timestamp': t[1].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/plain;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[2].internal,
'x-meta-timestamp': t[4].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# POST with same-time content-type and metadata returns 409
# and no container update is expected
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[4].internal,
'Content-Type': 'ignored',
'Content-Type-Timestamp': t[2].internal,
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(0, len(calls_made))
# POST with implicit newer content-type but older metadata
# returns success and container update is expected,
# update reports existing metadata timestamp
calls_made = []
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={
'X-Timestamp': t[3].internal,
'Content-Type': 'text/newer',
'X-Backend-Storage-Policy-Index': int(policy)
})
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type': 'text/newer;swift_bytes=123456789',
'x-timestamp': t[1].internal,
'x-content-type-timestamp': t[3].internal,
'x-meta-timestamp': t[4].internal,
'x-etag': update_etag})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
def test_POST_container_updates_with_replication_policy(self):
self._test_POST_container_updates(POLICIES[0])
def test_POST_container_updates_with_EC_policy(self):
self._test_POST_container_updates(
POLICIES[1], update_etag='override_etag')
def test_POST_container_updates_precedence(self):
# Verify correct etag and size being sent with container updates for a
# PUT and for a subsequent POST.
def do_test(body, headers, policy):
def mock_container_update(ctlr, op, account, container, obj, req,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
calls_made = []
ts_put = next(self.ts)
# make PUT with given headers and verify correct etag is sent in
# container update
headers.update({
'Content-Type':
'application/octet-stream;swift_bytes=123456789',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 2,
'X-Timestamp': ts_put.internal,
'Content-Length': len(body)})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers, body=body)
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
expected_headers = HeaderKeyDict({
'x-size': '4',
'x-content-type':
'application/octet-stream;swift_bytes=123456789',
'x-timestamp': ts_put.internal,
'x-etag': 'expected'})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# make a POST and verify container update has the same etag
calls_made = []
ts_post = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': ts_post.internal,
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(1, len(calls_made))
expected_headers.update({
'x-content-type-timestamp': ts_put.internal,
'x-meta-timestamp': ts_post.internal})
self.assertDictEqual(expected_headers, calls_made[0][0])
self.assertEqual(policy, calls_made[0][1])
# sanity check - EC headers are ok
headers = {
'X-Backend-Container-Update-Override-Etag': 'expected',
'X-Backend-Container-Update-Override-Size': '4',
'X-Object-Sysmeta-Ec-Etag': 'expected',
'X-Object-Sysmeta-Ec-Content-Length': '4'}
do_test('test ec frag longer than 4', headers, POLICIES[1])
# middleware overrides take precedence over EC/older overrides
headers = {
'X-Backend-Container-Update-Override-Etag': 'unexpected',
'X-Backend-Container-Update-Override-Size': '3',
'X-Object-Sysmeta-Ec-Etag': 'unexpected',
'X-Object-Sysmeta-Ec-Content-Length': '3',
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('test ec frag longer than 4', headers, POLICIES[1])
# overrides with replication policy
headers = {
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('longer than 4', headers, POLICIES[0])
# middleware overrides take precedence over EC/older overrides with
# replication policy
headers = {
'X-Backend-Container-Update-Override-Etag': 'unexpected',
'X-Backend-Container-Update-Override-Size': '3',
'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected',
'X-Object-Sysmeta-Container-Update-Override-Size': '4'}
do_test('longer than 4', headers, POLICIES[0])
def _test_PUT_then_POST_async_pendings(self, policy, update_etag=None):
# Test that PUT and POST requests result in distinct async pending
# files when sync container update fails.
def fake_http_connect(*args):
raise Exception('test')
device_dir = os.path.join(self.testdir, 'sda1')
t_put = next(self.ts)
update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6'
put_headers = {
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
'Content-Length': '4',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice'}
if policy.policy_type == EC_POLICY:
put_headers.update({
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Backend-Container-Update-Override-Etag': update_etag,
'X-Object-Sysmeta-Ec-Etag': update_etag})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers, body='test')
with mock.patch('swift.obj.server.http_connect', fake_http_connect), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
async_pending_file_put = os.path.join(
device_dir, diskfile.get_async_dir(policy), 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_put.internal)
self.assertTrue(os.path.isfile(async_pending_file_put),
'Expected %s to be a file but it is not.'
% async_pending_file_put)
expected_put_headers = {
'Referer': 'PUT http://localhost/sda1/p/a/c/o',
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'X-Content-Type': 'application/octet-stream;swift_bytes=123456789',
'X-Size': '4',
'X-Etag': '098f6bcd4621d373cade4e832627b4f6',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': '%d' % int(policy)}
if policy.policy_type == EC_POLICY:
expected_put_headers['X-Etag'] = update_etag
self.assertDictEqual(
pickle.load(open(async_pending_file_put, 'rb')),
{'headers': expected_put_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# POST with newer metadata returns success and container update
# is expected
t_post = next(self.ts)
post_headers = {
'X-Trans-Id': 'post_trans_id',
'X-Timestamp': t_post.internal,
'Content-Type': 'application/other',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=post_headers)
with mock.patch('swift.obj.server.http_connect', fake_http_connect), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.maxDiff = None
# check async pending file for PUT is still intact
self.assertDictEqual(
pickle.load(open(async_pending_file_put, 'rb')),
{'headers': expected_put_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# check distinct async pending file for POST
async_pending_file_post = os.path.join(
device_dir, diskfile.get_async_dir(policy), 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_post.internal)
self.assertTrue(os.path.isfile(async_pending_file_post),
'Expected %s to be a file but it is not.'
% async_pending_file_post)
expected_post_headers = {
'Referer': 'POST http://localhost/sda1/p/a/c/o',
'X-Trans-Id': 'post_trans_id',
'X-Timestamp': t_put.internal,
'X-Content-Type': 'application/other;swift_bytes=123456789',
'X-Size': '4',
'X-Etag': '098f6bcd4621d373cade4e832627b4f6',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': '%d' % int(policy),
'X-Meta-Timestamp': t_post.internal,
'X-Content-Type-Timestamp': t_post.internal,
}
if policy.policy_type == EC_POLICY:
expected_post_headers['X-Etag'] = update_etag
self.assertDictEqual(
pickle.load(open(async_pending_file_post, 'rb')),
{'headers': expected_post_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
# verify that only the POST (most recent) async update gets sent by the
# object updater, and that both update files are deleted
with mock.patch(
'swift.obj.updater.ObjectUpdater.object_update') as mock_update, \
mock.patch('swift.obj.updater.dump_recon_cache'):
object_updater = updater.ObjectUpdater(
{'devices': self.testdir,
'mount_check': 'false'}, logger=debug_logger())
node = {'id': 1}
mock_ring = mock.MagicMock()
mock_ring.get_nodes.return_value = (99, [node])
object_updater.container_ring = mock_ring
mock_update.return_value = ((True, 1, None))
object_updater.run_once()
self.assertEqual(1, mock_update.call_count)
self.assertEqual((node, 99, 'PUT', '/a/c/o'),
mock_update.call_args_list[0][0][0:4])
actual_headers = mock_update.call_args_list[0][0][4]
# User-Agent is updated.
expected_post_headers['User-Agent'] = 'object-updater %s' % os.getpid()
expected_post_headers['X-Backend-Accept-Redirect'] = 'true'
expected_post_headers['X-Backend-Accept-Quoted-Location'] = 'true'
self.assertDictEqual(expected_post_headers, actual_headers)
self.assertFalse(
os.listdir(os.path.join(
device_dir, diskfile.get_async_dir(policy))))
def test_PUT_then_POST_async_pendings_with_repl_policy(self):
self._test_PUT_then_POST_async_pendings(POLICIES[0])
def test_PUT_then_POST_async_pendings_with_EC_policy(self):
self._test_PUT_then_POST_async_pendings(
POLICIES[1], update_etag='override_etag')
def _check_PUT_redirected_async_pending(self, container_path=None,
old_style=False):
# When container update is redirected verify that the redirect location
# is persisted in the async pending file.
policy = POLICIES[0]
device_dir = os.path.join(self.testdir, 'sda1')
t_put = next(self.ts)
update_etag = '098f6bcd4621d373cade4e832627b4f6'
put_headers = {
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
'Content-Length': '4',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Host': 'chost:3200',
'X-Container-Partition': '99',
'X-Container-Device': 'cdevice'}
if container_path:
# the proxy may include either header
hdr = ('X-Backend-Container-Path' if old_style
else 'X-Backend-Quoted-Container-Path')
put_headers[hdr] = container_path
expected_update_path = '/cdevice/99/%s/o' % container_path
else:
expected_update_path = '/cdevice/99/a/c/o'
if policy.policy_type == EC_POLICY:
put_headers.update({
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Backend-Container-Update-Override-Etag': update_etag,
'X-Object-Sysmeta-Ec-Etag': update_etag})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers, body=b'test')
resp_headers = {'Location': '/.sharded_a/c_shard_1/o',
'X-Backend-Redirect-Timestamp': next(self.ts).internal}
with mocked_http_conn(301, headers=[resp_headers]) as conn, \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''),\
fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(conn.requests))
self.assertEqual(expected_update_path, conn.requests[0]['path'])
# whether or not an X-Backend-Container-Path was received from the
# proxy, the async pending file should now have the container_path
# equal to the Location header received in the update response.
async_pending_file_put = os.path.join(
device_dir, diskfile.get_async_dir(policy), 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' % t_put.internal)
self.assertTrue(os.path.isfile(async_pending_file_put),
'Expected %s to be a file but it is not.'
% async_pending_file_put)
expected_put_headers = {
'Referer': 'PUT http://localhost/sda1/p/a/c/o',
'X-Trans-Id': 'put_trans_id',
'X-Timestamp': t_put.internal,
'X-Content-Type': 'application/octet-stream;swift_bytes=123456789',
'X-Size': '4',
'X-Etag': '098f6bcd4621d373cade4e832627b4f6',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': '%d' % int(policy)}
if policy.policy_type == EC_POLICY:
expected_put_headers['X-Etag'] = update_etag
self.assertEqual(
{'headers': expected_put_headers,
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT',
'container_path': '.sharded_a/c_shard_1'},
pickle.load(open(async_pending_file_put, 'rb')))
# when updater is run its first request will be to the redirect
# location that is persisted in the async pending file
with mocked_http_conn(201) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache',
lambda *args: None):
object_updater = updater.ObjectUpdater(
{'devices': self.testdir,
'mount_check': 'false'}, logger=debug_logger())
node = {'id': 1, 'ip': 'chost', 'port': 3200,
'replication_ip': 'chost_repl',
'replication_port': 6200,
'device': 'cdevice'}
mock_ring = mock.MagicMock()
mock_ring.get_nodes.return_value = (99, [node])
object_updater.container_ring = mock_ring
object_updater.run_once()
self.assertEqual(1, len(conn.requests))
self.assertEqual('/cdevice/99/.sharded_a/c_shard_1/o',
conn.requests[0]['path'])
self.assertEqual(6200, conn.requests[0]['port'])
self.assertEqual('chost_repl', conn.requests[0]['ip'])
def test_PUT_redirected_async_pending(self):
self._check_PUT_redirected_async_pending()
def test_PUT_redirected_async_pending_with_container_path(self):
self._check_PUT_redirected_async_pending(container_path='.another/c')
def test_PUT_redirected_async_pending_with_old_style_container_path(self):
self._check_PUT_redirected_async_pending(
container_path='.another/c', old_style=True)
def test_POST_quarantine_zbyte(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
objfile.open()
file_name = os.path.basename(objfile._data_file)
with open(objfile._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(objfile._data_file)
with open(objfile._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
self.assertEqual(os.listdir(objfile._datadir)[0], file_name)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time())})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(objfile._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_PUT_invalid_path(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_timestamp(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT',
'CONTENT_LENGTH': '0'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_bad_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'bad'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_content_type(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6',
'Content-Type': '\xff\xff'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue(b'Content-Type' in resp.body)
def test_PUT_no_content_length(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
del req.headers['Content-Length']
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 411)
def test_PUT_zero_content_length(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = ''
self.assertEqual(req.headers['Content-Length'], '0')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_bad_transfer_encoding(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
req.headers['Transfer-Encoding'] = 'bad'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_if_none_match_star(self):
# First PUT should succeed
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(self.ts).normal,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# File should already exist so it should fail
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(self.ts).normal,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': next(self.ts).normal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(self.ts).normal,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match(self):
# PUT with if-none-match set and nothing there should succeed
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': 'notthere'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# PUT with if-none-match of the object etag should fail
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'If-None-Match': '0b4c12d7e0a73840c1c4f148fda3b037'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_but_expired(self):
inital_put = next(self.ts)
put_before_expire = next(self.ts)
delete_at_timestamp = int(next(self.ts))
put_after_expire = next(self.ts)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': inital_put.normal,
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# PUT again before object has expired should fail
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_before_expire.normal,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
# PUT again after object has expired should succeed
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_after_expire.normal,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'If-None-Match': '*'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_common(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'x-object-meta-test': 'one',
'x-object-meta-t\xc3\xa8st': 'm\xc3\xa8ta',
'Custom-Header': '*',
'X-Backend-Replication-Headers':
'x-object-meta-t\xc3\xa8st Content-Type Content-Length'})
req.body = 'VERIFY'
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
self.object_controller.allowed_headers = ['Custom-Header']
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]),
'p', hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '6',
'ETag': '0b4c12d7e0a73840c1c4f148fda3b037',
'Content-Type': 'application/octet-stream',
'name': '/a/c/o',
'X-Object-Meta-Test': 'one',
'X-Object-Meta-T\xc3\xa8St': 'm\xc3\xa8ta',
'Custom-Header': '*'})
def test_PUT_overwrite(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_overwrite_to_older_ts_success(self):
old_timestamp = next(self.ts)
new_timestamp = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': old_timestamp.normal,
'Content-Length': '0',
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': new_timestamp.normal,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
new_timestamp.internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(
diskfile.read_metadata(objfile),
{'X-Timestamp': new_timestamp.internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_overwrite_to_newer_ts_failed(self):
old_timestamp = next(self.ts)
new_timestamp = next(self.ts)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': new_timestamp.normal,
'Content-Length': '0',
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': old_timestamp.normal,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
with mock.patch(
'swift.obj.diskfile.BaseDiskFile.create') as mock_create:
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(mock_create.call_count, 0)
# data file doesn't exist there (This is sanity because
# if .data written unexpectedly, it will be removed
# by cleanup_ondisk_files)
datafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
old_timestamp.internal + '.data')
self.assertFalse(os.path.exists(datafile))
# ts file sitll exists
tsfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
new_timestamp.internal + '.ts')
self.assertTrue(os.path.isfile(tsfile))
def test_PUT_overwrite_w_delete_at(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'X-Delete-At': 9999999999,
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY TWO')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '10',
'ETag': 'b381a4c5dab1eaa1eb9711fa647cd039',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'Content-Encoding': 'gzip'})
def test_PUT_old_timestamp(self):
ts = time()
orig_timestamp = utils.Timestamp(ts).internal
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': orig_timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(ts),
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY TWO'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(ts - 1),
'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'})
req.body = 'VERIFY THREE'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
def test_PUT_new_object_really_old_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '-1', # 1969-12-31 23:59:59
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', # 1970-01-01 00:00:01
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_object_really_new_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '9999999999', # 2286-11-20 17:46:40
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# roll over to 11 digits before the decimal
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '10000000000',
'Content-Length': '6',
'Content-Type': 'application/octet-stream'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_no_etag(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'text/plain'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_invalid_etag(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'text/plain',
'ETag': 'invalid'})
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_user_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY THREE'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY THREE')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': utils.Timestamp(timestamp).internal,
'Content-Length': '12',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'Content-Type': 'text/plain',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
def test_PUT_etag_in_footer(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Etag': 'other-etag',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
obj_etag = md5(b"obj data", usedforsecurity=False).hexdigest()
footer_meta = json.dumps({"Etag": obj_etag}).encode('ascii')
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.etag, obj_etag)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
with open(objfile) as fh:
self.assertEqual(fh.read(), "obj data")
def _check_container_override_etag_preference(self, override_headers,
override_footers):
def mock_container_update(ctlr, op, account, container, obj, req,
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
calls_made = []
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Etag': 'other-etag',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'}
headers.update(override_headers)
req = Request.blank(
'/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'PUT'})
obj_etag = md5(b"obj data", usedforsecurity=False).hexdigest()
footers = {'Etag': obj_etag}
footers.update(override_footers)
footer_meta = json.dumps(footers).encode('ascii')
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
with mock.patch(
'swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.etag, obj_etag)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(calls_made))
self.assertEqual({
'X-Size': str(len('obj data')),
'X-Etag': 'update-etag',
'X-Content-Type': 'text/plain',
'X-Timestamp': ts_put.internal,
}, calls_made[0][0])
self.assertEqual(POLICIES[0], calls_made[0][1])
def test_override_etag_lone_header_footer(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Backend-Container-Update-Override-Etag': 'update-etag'})
self._check_container_override_etag_preference(
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}),
def test_override_etag_footer_trumps_header(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'ignored-etag'},
{'X-Backend-Container-Update-Override-Etag': 'update-etag'})
self._check_container_override_etag_preference(
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'ignored-etag'},
{'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'})
def test_override_etag_sysmeta_trumps_backend(self):
self._check_container_override_etag_preference(
{'X-Backend-Container-Update-Override-Etag': 'ignored-etag',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}, {})
self._check_container_override_etag_preference(
{}, {'X-Backend-Container-Update-Override-Etag': 'ignored-etag',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'})
def test_override_etag_sysmeta_header_trumps_backend_footer(self):
headers = {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}
footers = {'X-Backend-Container-Update-Override-Etag':
'ignored-etag'}
self._check_container_override_etag_preference(headers, footers)
def test_override_etag_sysmeta_footer_trumps_backend_header(self):
headers = {'X-Backend-Container-Update-Override-Etag':
'ignored-etag'}
footers = {'X-Object-Sysmeta-Container-Update-Override-Etag':
'update-etag'}
self._check_container_override_etag_preference(headers, footers)
def test_PUT_etag_in_footer_mismatch(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footers = {"Etag": md5(b"green", usedforsecurity=False).hexdigest()}
footer_meta = json.dumps(footers).encode('ascii')
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"blue",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_meta_in_footer(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Object-Meta-X': 'Z',
'X-Object-Sysmeta-X': 'Z',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({
'X-Object-Meta-X': 'Y',
'X-Object-Sysmeta-X': 'Y',
}).encode('ascii')
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"stuff stuff stuff",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp},
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.headers.get('X-Object-Meta-X'), 'Y')
self.assertEqual(resp.headers.get('X-Object-Sysmeta-X'), 'Y')
def test_PUT_missing_footer_checksum(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({
"Etag": md5(b"obj data", usedforsecurity=False).hexdigest()
}).encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
# no Content-MD5
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_bad_footer_checksum(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({
"Etag": md5(b"obj data", usedforsecurity=False).hexdigest()
}).encode('ascii')
bad_footer_meta_cksum = \
md5(footer_meta + b"bad",
usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
b"Content-MD5: " + bad_footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 422)
def test_PUT_bad_footer_json(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = b"{{{[[{{[{[[{[{[[{{{[{{{{[[{{[{["
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary--",
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_extra_mime_docs_ignored(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'},
environ={'REQUEST_METHOD': 'PUT'})
footer_meta = json.dumps({
'X-Object-Meta-Mint': 'pepper'
}).encode('ascii')
footer_meta_cksum = md5(
footer_meta, usedforsecurity=False).hexdigest().encode('ascii')
req.body = b"\r\n".join((
b"--boundary",
b"",
b"obj data",
b"--boundary",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_meta,
b"--boundary",
b"This-Document-Is-Useless: yes",
b"",
b"blah blah I take up space",
b"--boundary--"
))
req.headers.pop("Content-Length", None)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# swob made this into a StringIO for us
wsgi_input = req.environ['wsgi.input']
self.assertEqual(wsgi_input.tell(), len(wsgi_input.getvalue()))
def test_PUT_user_metadata_no_xattr(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': 'b114ab7b90d9ccac4bd5d99cc7ebb568',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY THREE'
def mock_get_and_setxattr(*args, **kargs):
error_num = errno.ENOTSUP if hasattr(errno, 'ENOTSUP') else \
errno.EOPNOTSUPP
raise IOError(error_num, 'Operation not supported')
with mock.patch('xattr.getxattr', mock_get_and_setxattr):
with mock.patch('xattr.setxattr', mock_get_and_setxattr):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
def test_PUT_client_timeout(self):
class FakeTimeout(BaseException):
def __enter__(self):
raise self
def __exit__(self, typ, value, tb):
pass
with mock.patch.object(object_server, 'ChunkReadTimeout', FakeTimeout):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '6'})
req.environ['wsgi.input'] = WsgiBytesIO(b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 408)
def test_PUT_client_closed_connection(self):
class fake_input(object):
def read(self, *a, **kw):
# On client disconnect during a chunked transfer, eventlet
# may raise a ValueError (or ChunkReadError, following
# https://github.com/eventlet/eventlet/commit/c3ce3ee -- but
# that inherits from ValueError)
raise ValueError
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'Content-Length': '6'})
req.environ['wsgi.input'] = fake_input()
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 499)
def test_PUT_system_metadata(self):
# check that sysmeta is stored in diskfile
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
def test_PUT_succeeds_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_put2 = next(self.ts).internal
t_post = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put2,
'Content-Length': 0,
'Content-Type': 'plain/text'},
)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_put2 + '.data')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post + '.meta')
self.assertTrue(os.path.isfile(meta_file))
def test_POST_system_metadata(self):
# check that diskfile sysmeta is not changed by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Sysmeta-Two': 'Not Two'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two'})
# .meta file metadata should have only user meta items
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'X-Object-Meta-1': 'Not One'})
def test_POST_then_fetch_content_type(self):
# check that content_type is updated by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'Content-Type': 'text/html'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One'})
# .meta file metadata should have updated content-type
metafile_name = encode_timestamps(Timestamp(timestamp2),
Timestamp(timestamp2),
explicit=True)
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
metafile_name + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'Content-Type': 'text/html',
'Content-Type-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One'})
def check_response(resp):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/html')
self.assertEqual(resp.headers['content-type'], 'text/html')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp2)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'Not One')
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_POST_transient_sysmeta(self):
# check that diskfile transient system meta is changed by a POST
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# original .data file metadata should be unchanged
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA')
self.assertDictEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Length': '14',
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
# .meta file metadata should have only user meta items
metafile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.meta')
self.assertTrue(os.path.isfile(metafile))
self.assertDictEqual(diskfile.read_metadata(metafile),
{'X-Timestamp': timestamp2,
'name': '/a/c/o',
'X-Object-Meta-1': 'Not One',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
def test_PUT_then_fetch_system_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def check_response(resp):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two')
self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'],
'Bar')
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_PUT_then_POST_then_fetch_system_metadata(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'text/plain',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'X-Object-Meta-0': 'deleted by post',
'X-Object-Sysmeta-0': 'Zero',
'X-Object-Transient-Sysmeta-0': 'deleted by post',
'X-Object-Meta-1': 'One',
'X-Object-Sysmeta-1': 'One',
'X-Object-Sysmeta-Two': 'Two',
'X-Object-Transient-Sysmeta-Foo': 'Bar'})
req.body = 'VERIFY SYSMETA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'X-Object-Meta-1': 'Not One',
'X-Object-Sysmeta-1': 'Not One',
'X-Object-Sysmeta-Two': 'Not Two',
'X-Object-Transient-Sysmeta-Foo': 'Not Bar'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def check_response(resp):
# user meta should be updated but not sysmeta
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 14)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp2)))))
self.assertEqual(resp.headers['etag'],
'"1000d172764c9dbc3a5798a67ec5bb76"')
self.assertEqual(resp.headers['x-object-meta-1'], 'Not One')
self.assertEqual(resp.headers['x-object-sysmeta-0'], 'Zero')
self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One')
self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two')
self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'],
'Not Bar')
self.assertNotIn('x-object-meta-0', resp.headers)
self.assertNotIn('x-object-transient-sysmeta-0', resp.headers)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
check_response(resp)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
check_response(resp)
def test_PUT_with_replication_headers(self):
# check that otherwise disallowed headers are accepted when specified
# by X-Backend-Replication-Headers
# first PUT object
timestamp1 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'Content-Length': '14',
'Etag': '1000d172764c9dbc3a5798a67ec5bb76',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False'})
req.body = 'VERIFY SYSMETA'
# restrict set of allowed headers on this server
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp1 + '.data')
# X-Static-Large-Object is disallowed.
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp1,
'Content-Type': 'text/plain',
'Content-Length': '14',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1'})
# PUT object again with X-Backend-Replication-Headers
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'text/plain',
'Content-Length': '14',
'Etag': '1000d172764c9dbc3a5798a67ec5bb76',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False',
'X-Backend-Replication-Headers':
'X-Static-Large-Object'})
req.body = 'VERIFY SYSMETA'
with mock.patch.object(self.object_controller, 'allowed_headers',
['Custom-Header']):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp2 + '.data')
# X-Static-Large-Object should be copied since it is now allowed by
# replication headers.
self.assertEqual(diskfile.read_metadata(objfile),
{'X-Timestamp': timestamp2,
'Content-Type': 'text/plain',
'Content-Length': '14',
'ETag': '1000d172764c9dbc3a5798a67ec5bb76',
'name': '/a/c/o',
'Custom-Header': 'custom1',
'X-Object-Meta-1': 'meta1',
'X-Static-Large-Object': 'False'})
def test_PUT_container_connection(self):
def mock_http_connect(response, with_exc=False):
class FakeConn(object):
def __init__(self, status, with_exc):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def read(self, amt=None):
return b''
return lambda *args, **kwargs: FakeConn(response, with_exc)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect', mock_http_connect(201)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect', mock_http_connect(500)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'X-Container-Host': '1.2.3.4:0',
'X-Container-Partition': '3',
'X-Container-Device': 'sda1',
'X-Container-Timestamp': '1',
'Content-Type': 'application/new1',
'Content-Length': '0'})
with mock.patch.object(
object_server, 'http_connect',
mock_http_connect(500, with_exc=True)):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def test_EC_PUT_GET_data(self):
for policy in self.ec_policies:
ts = next(self.ts)
raw_data = (b'VERIFY' * policy.ec_segment_size)[:-432]
frag_archives = encode_frag_archive_bodies(policy, raw_data)
frag_index = random.randint(0, len(frag_archives) - 1)
# put EC frag archive
req = Request.blank('/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': ts.internal,
'Content-Type': 'application/verify',
'Content-Length': len(frag_archives[frag_index]),
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
'X-Backend-Storage-Policy-Index': int(policy),
})
req.body = frag_archives[frag_index]
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# get EC frag archive
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Storage-Policy-Index': int(policy),
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, frag_archives[frag_index])
# check the diskfile is durable
df_mgr = diskfile.ECDiskFileManager(self.conf,
self.object_controller.logger)
df = df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy,
frag_prefs=[])
with df.open():
self.assertEqual(ts, df.data_timestamp)
self.assertEqual(df.data_timestamp, df.durable_timestamp)
def test_EC_PUT_GET_data_no_commit(self):
for policy in self.ec_policies:
ts = next(self.ts)
raw_data = (b'VERIFY' * policy.ec_segment_size)[:-432]
frag_archives = encode_frag_archive_bodies(policy, raw_data)
frag_index = random.randint(0, len(frag_archives) - 1)
# put EC frag archive
req = Request.blank('/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': ts.internal,
'Content-Type': 'application/verify',
'Content-Length': len(frag_archives[frag_index]),
'X-Backend-No-Commit': 'true',
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
'X-Backend-Storage-Policy-Index': int(policy),
})
req.body = frag_archives[frag_index]
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# get EC frag archive will 404 - nothing durable...
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Storage-Policy-Index': int(policy),
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
# ...unless we explicitly request *any* fragment...
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': '[]',
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, frag_archives[frag_index])
# check the diskfile is not durable
df_mgr = diskfile.ECDiskFileManager(self.conf,
self.object_controller.logger)
df = df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o', policy,
frag_prefs=[])
with df.open():
self.assertEqual(ts, df.data_timestamp)
self.assertIsNone(df.durable_timestamp)
def test_EC_GET_quarantine_invalid_frag_archive(self):
policy = random.choice(self.ec_policies)
raw_data = (b'VERIFY' * policy.ec_segment_size)[:-432]
frag_archives = encode_frag_archive_bodies(policy, raw_data)
frag_index = random.randint(0, len(frag_archives) - 1)
content_length = len(frag_archives[frag_index])
# put EC frag archive
req = Request.blank('/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/verify',
'Content-Length': content_length,
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
'X-Backend-Storage-Policy-Index': int(policy),
})
corrupt = b'garbage' + frag_archives[frag_index]
req.body = corrupt[:content_length]
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# get EC frag archive
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Storage-Policy-Index': int(policy),
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
with self.assertRaises(DiskFileQuarantined) as ctx:
resp.body
self.assertIn("Invalid EC metadata", str(ctx.exception))
# nothing is logged on *our* loggers
errors = self.object_controller.logger.get_lines_for_level('error')
self.assertEqual(errors, [])
# get EC frag archive - it's gone
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Storage-Policy-Index': int(policy),
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_PUT_ssync_multi_frag(self):
timestamp = utils.Timestamp.now().internal
def put_with_index(expected_rsp, frag_index, node_index=None):
data_file_tail = '#%d#d.data' % frag_index
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Ssync-Frag-Index': node_index,
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
'X-Backend-Storage-Policy-Index': int(policy)}
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(
resp.status_int, expected_rsp,
'got %s != %s for frag_index=%s node_index=%s' % (
resp.status_int, expected_rsp,
frag_index, node_index))
if expected_rsp == 409:
return
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
'p', hash_path('a', 'c', 'o')))
data_file = os.path.join(obj_dir, timestamp) + data_file_tail
self.assertTrue(os.path.isfile(data_file),
'Expected file %r not found in %r for policy %r'
% (data_file, os.listdir(obj_dir), int(policy)))
for policy in POLICIES:
if policy.policy_type == EC_POLICY:
# upload with a ec-frag-index
put_with_index(201, 3)
# same timestamp will conflict a different ec-frag-index
put_with_index(409, 2)
# but with the ssync-frag-index (primary node) it will just
# save both!
put_with_index(201, 2, 2)
# but even with the ssync-frag-index we can still get a
# timestamp collisison if the file already exists
put_with_index(409, 3, 3)
# FWIW, ssync will never send in-consistent indexes - but if
# something else did, from the object server perspective ...
# ... the ssync-frag-index is canonical on the
# read/pre-existance check
put_with_index(409, 7, 2)
# ... but the ec-frag-index is canonical when it comes to on
# disk file
put_with_index(201, 7, 6)
def test_PUT_commits_data(self):
for policy in POLICIES:
timestamp = utils.Timestamp(int(time())).internal
data_file_tail = '.data'
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
# commit renames data file
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
data_file_tail = '#2#d.data'
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
'p', hash_path('a', 'c', 'o')))
data_file = os.path.join(obj_dir, timestamp) + data_file_tail
self.assertTrue(os.path.isfile(data_file),
'Expected file %r not found in %r for policy %r'
% (data_file, os.listdir(obj_dir), int(policy)))
rmtree(obj_dir)
def test_PUT_next_part_power(self):
hash_path_ = hash_path('a', 'c', 'o')
part_power = 10
old_part = utils.get_partition_for_hash(hash_path_, part_power)
new_part = utils.get_partition_for_hash(hash_path_, part_power + 1)
policy = POLICIES.default
timestamp = utils.Timestamp(int(time())).internal
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Next-Part-Power': part_power + 1}
req = Request.blank(
'/sda1/%s/a/c/o' % old_part, method='PUT',
headers=headers, body=b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def check_file(part):
data_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
part, hash_path_), timestamp + '.data')
self.assertTrue(os.path.isfile(data_file))
check_file(old_part)
check_file(new_part)
def test_PUT_next_part_power_eexist(self):
hash_path_ = hash_path('a', 'c', 'o')
part_power = 10
old_part = utils.get_partition_for_hash(hash_path_, part_power)
new_part = utils.get_partition_for_hash(hash_path_, part_power + 1)
policy = POLICIES.default
timestamp = utils.Timestamp(int(time())).internal
# There's no substitute for the real thing ;-)
tpool.execute = self._orig_tpool_exc
# This is a little disingenuous, but it's easier than reproducing
# the actual race that could lead to this EEXIST
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Trans-Id': 'txn1'}
req = Request.blank(
'/sda1/%s/a/c/o' % new_part, method='PUT',
headers=headers, body=b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# The write should succeed, but the relink will fail
headers = {'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Next-Part-Power': part_power + 1,
'X-Trans-Id': 'txn2'}
req = Request.blank(
'/sda1/%s/a/c/o' % old_part, method='PUT',
headers=headers, body=b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def check_file(part):
data_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
part, hash_path_), timestamp + '.data')
self.assertTrue(os.path.isfile(data_file))
check_file(old_part)
check_file(new_part)
error_lines = self.logger.get_lines_for_level('error')
self.assertIn('[Errno 17] File exists', error_lines[0])
self.assertEqual([], error_lines[1:])
log_extras = self.logger.log_dict['error'][0][1]['extra']
self.assertEqual('txn2', log_extras.get('txn_id'))
def test_PUT_next_part_power_races_around_makedirs_eexist(self):
# simulate two 'concurrent' racing to create the new object dir in the
# new partition and check that relinking tolerates the dir already
# existing when they attempt to create it
hash_path_ = hash_path('a', 'c', 'o')
part_power = 10
old_part = utils.get_partition_for_hash(hash_path_, part_power)
new_part = utils.get_partition_for_hash(hash_path_, part_power + 1)
policy = POLICIES.default
def make_request(timestamp):
headers = {'X-Timestamp': timestamp.internal,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Next-Part-Power': part_power + 1}
req = Request.blank(
'/sda1/%s/a/c/o' % old_part, method='PUT',
headers=headers, body=b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def data_file(part, timestamp):
return os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
part, hash_path_),
timestamp.internal + '.data')
ts_1 = next(self.ts)
ts_2 = next(self.ts)
calls = []
orig_makedirs = os.makedirs
def mock_makedirs(path, *args, **kwargs):
# let another request catch up just as the first is about to create
# the next part power object dir, then pretend the first request
# process actually makes the dir
if path == os.path.dirname(data_file(new_part, ts_1)):
calls.append(path)
if len(calls) == 1:
# pretend 'yield' to other request process
make_request(ts_2)
if len(calls) == 2:
# pretend 'yield' back to first request process for
# its call to makedirs
orig_makedirs(calls[0])
return orig_makedirs(path, *args, **kwargs)
with mock.patch('swift.obj.diskfile.os.makedirs', mock_makedirs):
make_request(ts_1)
self.assertEqual(
[os.path.dirname(data_file(new_part, ts_1)),
os.path.dirname(data_file(new_part, ts_1))], calls)
self.assertTrue(os.path.isfile(data_file(old_part, ts_2)))
self.assertTrue(os.path.isfile(data_file(new_part, ts_2)))
self.assertFalse(os.path.isfile(data_file(new_part, ts_1)))
self.assertFalse(os.path.isfile(data_file(old_part, ts_1)))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual([], error_lines)
def test_PUT_next_part_power_races_around_makedirs_enoent(self):
hash_path_ = hash_path('a', 'c', 'o')
part_power = 10
old_part = utils.get_partition_for_hash(hash_path_, part_power)
new_part = utils.get_partition_for_hash(hash_path_, part_power + 1)
policy = POLICIES.default
def make_request(timestamp):
headers = {'X-Timestamp': timestamp.internal,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Next-Part-Power': part_power + 1}
req = Request.blank(
'/sda1/%s/a/c/o' % old_part, method='PUT',
headers=headers, body=b'VERIFY')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
def data_file(part, timestamp):
return os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(int(policy)),
part, hash_path_),
timestamp.internal + '.data')
ts_1 = next(self.ts)
ts_2 = next(self.ts)
calls = []
orig_makedirs = os.makedirs
def mock_makedirs(path, *args, **kwargs):
# let another request race ahead just as the first is about to
# create the next part power object dir
if path == os.path.dirname(data_file(new_part, ts_1)):
calls.append(path)
if len(calls) == 1:
# pretend 'yield' to other request process
make_request(ts_2)
return orig_makedirs(path, *args, **kwargs)
with mock.patch('swift.obj.diskfile.os.makedirs', mock_makedirs):
make_request(ts_1)
self.assertEqual(
[os.path.dirname(data_file(new_part, ts_1)),
os.path.dirname(data_file(new_part, ts_1))], calls)
self.assertTrue(os.path.isfile(data_file(old_part, ts_2)))
self.assertTrue(os.path.isfile(data_file(new_part, ts_2)))
self.assertFalse(os.path.isfile(data_file(new_part, ts_1)))
self.assertFalse(os.path.isfile(data_file(old_part, ts_1)))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual([], error_lines)
def test_HEAD(self):
# Test swift.obj.server.ObjectController.HEAD
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse('X-Backend-Timestamp' in resp.headers)
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 6)
self.assertEqual(resp.content_type, 'application/x-test')
self.assertEqual(resp.headers['content-type'], 'application/x-test')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"0b4c12d7e0a73840c1c4f148fda3b037"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-meta-two'], 'Two')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-length': '6'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(timestamp).internal)
def test_HEAD_quarantine_zbyte(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
with open(disk_file._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(disk_file._data_file)
with open(disk_file._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
file_name = os.path.basename(disk_file._data_file)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_OPTIONS(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
server_handler = object_server.ObjectController(
conf, logger=debug_logger())
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE \
SSYNC'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEqual(len(resp.headers['Allow'].split(', ')), 8)
self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_insufficient_storage_mount_check_true(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
object_controller = object_server.ObjectController(conf)
for policy in POLICIES:
mgr = object_controller._diskfile_router[policy]
self.assertTrue(mgr.mount_check)
for method in object_controller.allowed_methods:
if method in ('OPTIONS', 'SSYNC'):
continue
path = '/sda1/p/'
if method == 'REPLICATE':
path += 'suff'
else:
path += 'a/c/o'
req = Request.blank(path, method=method,
headers={'x-timestamp': '1',
'content-type': 'app/test',
'content-length': 0})
with mock_check_drive() as mocks:
try:
resp = req.get_response(object_controller)
self.assertEqual(resp.status_int, 507)
mocks['ismount'].return_value = True
resp = req.get_response(object_controller)
self.assertNotEqual(resp.status_int, 507)
# feel free to rip out this last assertion...
expected = 2 if method in ('PUT', 'REPLICATE') else 4
self.assertEqual(resp.status_int // 100, expected)
except AssertionError as e:
self.fail('%s for %s' % (e, method))
def test_insufficient_storage_mount_check_false(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
object_controller = object_server.ObjectController(conf)
for policy in POLICIES:
mgr = object_controller._diskfile_router[policy]
self.assertFalse(mgr.mount_check)
for method in object_controller.allowed_methods:
if method in ('OPTIONS', 'SSYNC'):
continue
path = '/sda1/p/'
if method == 'REPLICATE':
path += 'suff'
else:
path += 'a/c/o'
req = Request.blank(path, method=method,
headers={'x-timestamp': '1',
'content-type': 'app/test',
'content-length': 0})
with mock_check_drive() as mocks:
try:
resp = req.get_response(object_controller)
self.assertEqual(resp.status_int, 507)
mocks['isdir'].return_value = True
resp = req.get_response(object_controller)
self.assertNotEqual(resp.status_int, 507)
# feel free to rip out this last assertion...
expected = 2 if method in ('PUT', 'REPLICATE') else 4
self.assertEqual(resp.status_int // 100, expected)
except AssertionError as e:
self.fail('%s for %s' % (e, method))
def test_GET(self):
# Test swift.obj.server.ObjectController.GET
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse('X-Backend-Timestamp' in resp.headers)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'VERIFY')
self.assertEqual(resp.content_length, 6)
self.assertEqual(resp.content_type, 'application/x-test')
self.assertEqual(resp.headers['content-length'], '6')
self.assertEqual(resp.headers['content-type'], 'application/x-test')
self.assertEqual(
resp.headers['last-modified'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
self.assertEqual(resp.headers['etag'],
'"0b4c12d7e0a73840c1c4f148fda3b037"')
self.assertEqual(resp.headers['x-object-meta-1'], 'One')
self.assertEqual(resp.headers['x-object-meta-two'], 'Two')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-3'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'ERI')
self.assertEqual(resp.headers['content-length'], '3')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'ERIFY')
self.assertEqual(resp.headers['content-length'], '5')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=-2'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'FY')
self.assertEqual(resp.headers['content-length'], '2')
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=100-'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 416)
self.assertIn(b'Not Satisfiable', resp.body)
self.assertEqual('bytes */6', resp.headers['content-range'])
# Proxy (SLO in particular) can say that if some metadata's present,
# it wants the whole thing
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-3'
req.headers['X-Backend-Ignore-Range-If-Metadata-Present'] = \
'X-Object-Meta-1'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'VERIFY')
self.assertEqual(resp.headers['content-length'], '6')
self.assertNotIn('content-range', resp.headers)
# If it's not present, Range is still respected
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=1-3'
req.headers['X-Backend-Ignore-Range-If-Metadata-Present'] = \
'X-Object-Meta-5'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'ERI')
self.assertEqual(resp.headers['content-length'], '3')
self.assertEqual('bytes 1-3/6', resp.headers['content-range'])
# Works like "any", not "all"; also works where we would've 416ed
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
req.range = 'bytes=100-'
req.headers['X-Backend-Ignore-Range-If-Metadata-Present'] = \
'X-Object-Meta-1, X-Object-Meta-5'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'VERIFY')
self.assertEqual(resp.headers['content-length'], '6')
self.assertNotIn('content-range', resp.headers)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.data')
os.unlink(objfile)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application:octet-stream',
'Content-Length': '6'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
sleep(.00001)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(timestamp).internal)
def test_GET_range_zero_byte_object(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/zero-byte',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b''
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/zero-byte',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-10'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_GET_range_not_satisfiable(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/zero-byte',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'7 bytes'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/zero-byte',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=1-20, 30-40'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 206)
self.assertEqual('bytes 1-6/7', resp.headers.get('Content-Range'))
self.assertEqual(b' bytes', resp.body)
req = Request.blank('/sda1/p/a/c/zero-byte',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-20'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 416)
self.assertEqual('bytes */7', resp.headers.get('Content-Range'))
exp_resp_body = b''.join(
HTTPRequestedRangeNotSatisfiable()({}, lambda *args: None))
self.assertEqual(str(len(exp_resp_body)),
resp.headers.get('Content-Length'))
self.assertEqual(
'"%s"' % md5(b'7 bytes', usedforsecurity=False).hexdigest(),
resp.headers.get('Etag'))
self.assertEqual(exp_resp_body, resp.body)
def test_GET_if_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertIn(
'"HEAD /sda1/p/a/c/o" 412 - ',
self.object_controller.logger.get_lines_for_level('info')[-1])
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertIn(
'"GET /sda1/p/a/c/o" 412 - ',
self.object_controller.logger.get_lines_for_level('info')[-1])
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={
'If-Match': '"11111111111111111111111111111111", "%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={
'If-Match':
'"11111111111111111111111111111111", '
'"22222222222222222222222222222222"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_GET_if_match_etag_is_at(self):
headers = {
'X-Timestamp': utils.Timestamp.now().internal,
'Content-Type': 'application/octet-stream',
'X-Object-Meta-Xtag': 'madeup',
'X-Object-Sysmeta-Xtag': 'alternate madeup',
}
req = Request.blank('/sda1/p/a/c/o', method='PUT',
headers=headers)
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
real_etag = resp.etag
# match x-backend-etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, using first in list of alternates
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At':
'X-Object-Meta-Xtag,X-Object-Sysmeta-Z'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, using second in list of alternates
alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Z'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, choosing first of multiple alternates
alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Xtag'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# match x-backend-etag-is-at, choosing first of multiple alternates
# (switches order of second two alternates from previous assertion)
alts = 'X-Object-Sysmeta-Y,X-Object-Sysmeta-Xtag,X-Object-Meta-Xtag'
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'alternate madeup',
'X-Backend-Etag-Is-At': alts})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# no match x-backend-etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag,
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
# etag-is-at metadata doesn't exist, default to real etag
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag,
'X-Backend-Etag-Is-At': 'X-Object-Meta-Missing'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# sanity no-match with no etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': 'madeup'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
# sanity match with no etag-is-at
req = Request.blank('/sda1/p/a/c/o', headers={
'If-Match': real_etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# sanity with no if-match
req = Request.blank('/sda1/p/a/c/o', headers={
'X-Backend-Etag-Is-At': 'X-Object-Meta-Xtag'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_HEAD_if_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={
'If-Match': '"11111111111111111111111111111111", "%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={
'If-Match':
'"11111111111111111111111111111111", '
'"22222222222222222222222222222222"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def test_GET_if_none_match(self):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'X-Object-Meta-Soup': 'gazpacho',
'Content-Type': 'application/fizzbuzz',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
self.assertEqual(resp.headers['Content-Type'], 'application/fizzbuzz')
self.assertEqual(resp.headers['X-Object-Meta-Soup'], 'gazpacho')
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match':
'"11111111111111111111111111111111", '
'"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
def test_HEAD_if_none_match(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
etag = resp.etag
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '*'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match': '"11111111111111111111111111111111"'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.etag, etag)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-None-Match':
'"11111111111111111111111111111111", '
'"%s"' % etag})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
self.assertEqual(resp.etag, etag)
def test_GET_if_modified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
timestamp = normalize_timestamp(int(time()))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp)))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
def test_HEAD_if_modified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = self.object_controller.GET(req)
self.assertEqual(resp.status_int, 304)
timestamp = normalize_timestamp(int(time()))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp)))
req = Request.blank('/sda1/p/a/c/o2',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Modified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 304)
def test_GET_if_unmodified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': timestamp,
'X-Object-Meta-Burr': 'ito',
'Content-Type': 'application/cat-picture',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(float(timestamp) + 1))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) - 9))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertEqual(resp.headers['Content-Type'],
'application/cat-picture')
self.assertEqual(resp.headers['X-Object-Meta-Burr'], 'ito')
since = \
strftime('%a, %d %b %Y %H:%M:%S GMT', gmtime(float(timestamp) + 9))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
since = resp.headers['Last-Modified']
self.assertEqual(since, strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))))
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_HEAD_if_unmodified_since(self):
timestamp = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'})
req.body = b'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)) + 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp))))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
since = strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)) - 1))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'If-Unmodified-Since': since})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
def assertECBodyEqual(self, resp, expected):
# we pull the policy index from the request environ since it seems to
# be missing from the response headers
policy_index = int(
resp.request.headers['X-Backend-Storage-Policy-Index'])
policy = POLICIES[policy_index]
frags = encode_frag_archive_bodies(policy, expected)
frag_index = int(resp.headers['X-Object-Sysmeta-Ec-Frag-Index'])
self.assertEqual(resp.body, frags[frag_index])
def _create_ondisk_fragments(self, policy):
# Create some on disk files...
# PUT at ts_0
ts_0 = next(self.ts)
body = b'OLDER'
headers = {'X-Timestamp': ts_0.internal,
'Content-Length': '5',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
body = encode_frag_archive_bodies(policy, body)[0]
headers.update({
'X-Object-Sysmeta-Ec-Frag-Index': '0',
'Content-Length': len(body),
})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = body
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# POST at ts_1
ts_1 = next(self.ts)
headers = {'X-Timestamp': ts_1.internal,
'X-Backend-Storage-Policy-Index': int(policy)}
headers['X-Object-Meta-Test'] = 'abc'
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers=headers)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# PUT again at ts_2 but without making the data file durable
ts_2 = next(self.ts)
body = b'NEWER'
headers = {'X-Timestamp': ts_2.internal,
'Content-Length': '5',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
body = encode_frag_archive_bodies(policy, body)[2]
headers.update({
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'Content-Length': len(body),
})
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = body
# patch the commit method to do nothing so EC object is non-durable
with mock.patch('swift.obj.diskfile.ECDiskFileWriter.commit'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
return ts_0, ts_1, ts_2
def test_GET_HEAD_with_fragment_preferences(self):
for policy in POLICIES:
ts_0, ts_1, ts_2 = self._create_ondisk_fragments(policy)
backend_frags = {ts_0.internal: [0], ts_2.internal: [2]}
def _assert_frag_0_at_ts_0(resp):
expect = {
'X-Timestamp': ts_1.normal,
'X-Backend-Timestamp': ts_1.internal,
'X-Backend-Data-Timestamp': ts_0.internal,
'X-Backend-Durable-Timestamp': ts_0.internal,
'X-Object-Sysmeta-Ec-Frag-Index': '0',
'X-Object-Meta-Test': 'abc'}
self._assertDictContainsSubset(expect, resp.headers)
self.assertEqual(backend_frags, json.loads(
resp.headers['X-Backend-Fragments']))
def _assert_repl_data_at_ts_2():
self.assertIn(resp.status_int, (200, 202))
expect = {
'X-Timestamp': ts_2.normal,
'X-Backend-Timestamp': ts_2.internal,
'X-Backend-Data-Timestamp': ts_2.internal,
'X-Backend-Durable-Timestamp': ts_2.internal}
self._assertDictContainsSubset(expect, resp.headers)
self.assertNotIn('X-Object-Meta-Test', resp.headers)
# Sanity check: Request with no preferences should default to the
# durable frag
headers = {'X-Backend-Storage-Policy-Index': int(policy)}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
self.assertECBodyEqual(resp, b'OLDER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preferences can select the older frag
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [1, 3]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
self.assertECBodyEqual(resp, b'OLDER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_0_at_ts_0(resp)
else:
_assert_repl_data_at_ts_2()
def _assert_frag_2_at_ts_2(resp):
self.assertIn(resp.status_int, (200, 202))
# do not expect meta file to be included since it is older
expect = {
'X-Timestamp': ts_2.normal,
'X-Backend-Timestamp': ts_2.internal,
'X-Backend-Data-Timestamp': ts_2.internal,
'X-Backend-Durable-Timestamp': ts_0.internal,
'X-Object-Sysmeta-Ec-Frag-Index': '2'}
self._assertDictContainsSubset(expect, resp.headers)
self.assertEqual(backend_frags, json.loads(
resp.headers['X-Backend-Fragments']))
self.assertNotIn('X-Object-Meta-Test', resp.headers)
# Request with preferences can select the newer non-durable frag
prefs = json.dumps(
[{'timestamp': ts_2.internal, 'exclude': [1, 3]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
self.assertECBodyEqual(resp, b'NEWER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preference for ts_0 but excludes index 0 will
# default to newest frag
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [0]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
self.assertECBodyEqual(resp, b'NEWER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
# Request with preferences that exclude all frags get nothing
prefs = json.dumps(
[{'timestamp': ts_0.internal, 'exclude': [0]},
{'timestamp': ts_2.internal, 'exclude': [2]}])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
self.assertEqual(resp.status_int, 404)
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
self.assertEqual(resp.status_int, 404)
else:
_assert_repl_data_at_ts_2()
# Request with empty preferences will get non-durable
prefs = json.dumps([])
headers = {'X-Backend-Storage-Policy-Index': int(policy),
'X-Backend-Fragment-Preferences': prefs}
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
self.assertECBodyEqual(resp, b'NEWER')
else:
_assert_repl_data_at_ts_2()
self.assertEqual(resp.body, b'NEWER')
req = Request.blank('/sda1/p/a/c/o', headers=headers,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
if policy.policy_type == EC_POLICY:
_assert_frag_2_at_ts_2(resp)
else:
_assert_repl_data_at_ts_2()
def test_GET_quarantine(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open()
file_name = os.path.basename(disk_file._data_file)
etag = md5(usedforsecurity=False)
etag.update(b'VERIF')
etag = etag.hexdigest()
metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o',
'Content-Length': 6, 'ETag': etag}
diskfile.write_metadata(disk_file._fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
body = resp.body # actually does quarantining
self.assertEqual(body, b'VERIFY')
self.assertEqual(os.listdir(quar_dir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_GET_quarantine_zbyte(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open(timestamp)
file_name = os.path.basename(disk_file._data_file)
with open(disk_file._data_file) as fp:
metadata = diskfile.read_metadata(fp)
os.unlink(disk_file._data_file)
with open(disk_file._data_file, 'w') as fp:
diskfile.write_metadata(fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(quar_dir)[0], file_name)
def test_GET_quarantine_range(self):
# Test swift.obj.server.ObjectController.GET
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
disk_file = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=POLICIES.legacy)
disk_file.open(timestamp)
file_name = os.path.basename(disk_file._data_file)
etag = md5(usedforsecurity=False)
etag.update(b'VERIF')
etag = etag.hexdigest()
metadata = {'X-Timestamp': timestamp, 'name': '/a/c/o',
'Content-Length': 6, 'ETag': etag}
diskfile.write_metadata(disk_file._fp, metadata)
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=0-4' # partial
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
resp.body
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
self.assertFalse(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=1-6' # partial
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
resp.body
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
self.assertFalse(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
req.range = 'bytes=0-14' # full
resp = req.get_response(self.object_controller)
quar_dir = os.path.join(
self.testdir, 'sda1', 'quarantined', 'objects',
os.path.basename(os.path.dirname(disk_file._data_file)))
self.assertEqual(os.listdir(disk_file._datadir)[0], file_name)
resp.body
self.assertTrue(os.path.isdir(quar_dir))
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_GET_keep_cache_private_config_true(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_private' is configured True, then
# disk_file.reader will be called with keep_cache=True.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'True'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
# Request headers have neither 'X-Auth-Token' nor 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
# Request headers have 'X-Auth-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
# Request headers have 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Storage-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
# Request headers have both 'X-Auth-Token' and 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj',
'X-Storage-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
def test_GET_keep_cache_private_config_false(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_private' is configured false, then
# disk_file.reader will be called with correct 'keep_cache'.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'false'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
# Request headers have neither 'X-Auth-Token' nor 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
# Request headers have 'X-Auth-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
# Request headers have 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Storage-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
# Request headers have both 'X-Auth-Token' and 'X-Storage-Token'.
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj',
'X-Storage-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
def test_GET_keep_cache_slo_manifest_no_config(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_slo_manifest' is not configured and object
# metadata has "X-Static-Large-Object", then disk_file.reader
# will be called with keep_cache=False.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'false'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Static-Large-Object': 'True'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
def test_GET_keep_cache_slo_manifest_config_false(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_slo_manifest' is configured False and object
# metadata has "X-Static-Large-Object", then disk_file.reader
# will be called with keep_cache=False.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'false',
'keep_cache_slo_manifest': 'false'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Static-Large-Object': 'True'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
def test_GET_keep_cache_slo_manifest_config_true(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_slo_manifest' is configured true and object
# metadata has "X-Static-Large-Object", then disk_file.reader
# will be called with keep_cache=True.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'false',
'keep_cache_slo_manifest': 'true'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'X-Static-Large-Object': 'True'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=True)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Static-Large-Object': 'True',
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
def test_GET_keep_cache_slo_manifest_not_slo(self):
# Test swift.obj.server.ObjectController.GET that, when
# 'keep_cache_slo_manifest' is configured true and object
# metadata has NO "X-Static-Large-Object", then disk_file.reader
# will be called with keep_cache=False.
# Set up a new ObjectController with customized configurations.
conf = {'devices': self.testdir, 'mount_check': 'false',
'container_update_timeout': 0.0,
'keep_cache_private': 'false',
'keep_cache_slo_manifest': 'true'}
obj_controller = object_server.ObjectController(
conf, logger=self.logger)
obj_controller.bytes_per_sync = 1
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test'})
req.body = b'VERIFY'
resp = req.get_response(obj_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o',
headers={'Content-Type': 'application/x-test',
'X-Auth-Token': '2340lsdfhhjl02lxfjj'})
reader_mock = mock.Mock(keep_cache=False)
with mock.patch('swift.obj.diskfile.BaseDiskFile.reader', reader_mock):
resp = req.get_response(obj_controller)
reader_mock.assert_called_with(keep_cache=False)
self.assertEqual(resp.status_int, 200)
etag = '"%s"' % md5(b'VERIFY', usedforsecurity=False).hexdigest()
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',
'Etag': etag,
'X-Backend-Timestamp': timestamp,
'X-Timestamp': timestamp,
'X-Backend-Data-Timestamp': timestamp,
'X-Backend-Durable-Timestamp': timestamp,
'Last-Modified': strftime(
'%a, %d %b %Y %H:%M:%S GMT',
gmtime(math.ceil(float(timestamp)))),
})
@mock.patch("time.time", mock_time)
def test_DELETE(self):
# Test swift.obj.server.ObjectController.DELETE
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
# The following should have created a tombstone file
timestamp = normalize_timestamp(1000)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
ts_1000_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1000_file))
# There should now be a 1000 ts file.
self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1)
# The following should *not* have created a tombstone file.
timestamp = normalize_timestamp(999)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
ts_999_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_999_file))
self.assertTrue(os.path.isfile(ts_1000_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1000_file))), 1)
orig_timestamp = utils.Timestamp(1002).internal
headers = {'X-Timestamp': orig_timestamp,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# There should now be 1000 ts and a 1001 data file.
data_1002_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
orig_timestamp + '.data')
self.assertTrue(os.path.isfile(data_1002_file))
self.assertEqual(len(os.listdir(os.path.dirname(data_1002_file))), 1)
# The following should *not* have created a tombstone file.
timestamp = normalize_timestamp(1001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp)
ts_1001_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(ts_1001_file))
self.assertTrue(os.path.isfile(data_1002_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1001_file))), 1)
timestamp = normalize_timestamp(1003)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
ts_1003_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(ts_1003_file))
self.assertEqual(len(os.listdir(os.path.dirname(ts_1003_file))), 1)
def test_DELETE_bad_timestamp(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'bad'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_succeeds_with_later_POST(self):
t_put = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
t_delete = next(self.ts).internal
t_post = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': t_post})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': t_delete},
)
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
obj_dir = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')))
ts_file = os.path.join(obj_dir, t_delete + '.ts')
self.assertTrue(os.path.isfile(ts_file))
meta_file = os.path.join(obj_dir, t_post + '.meta')
self.assertTrue(os.path.isfile(meta_file))
def test_DELETE_container_updates(self):
# Test swift.obj.server.ObjectController.DELETE and container
# updates, making sure container update is called in the correct
# state.
start = time()
orig_timestamp = utils.Timestamp(start)
headers = {'X-Timestamp': orig_timestamp.internal,
'Content-Type': 'application/octet-stream',
'Content-Length': '4'}
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
req.body = 'test'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
calls_made = [0]
def our_container_update(*args, **kwargs):
calls_made[0] += 1
orig_cu = self.object_controller.container_update
self.object_controller.container_update = our_container_update
try:
# The following request should return 409 (HTTP Conflict). A
# tombstone file should not have been created with this timestamp.
timestamp = utils.Timestamp(start - 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(resp.headers['x-backend-timestamp'],
orig_timestamp.internal)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
self.assertEqual(0, calls_made[0])
# The following request should return 204, and the object should
# be truly deleted (container update is performed) because this
# timestamp is newer. A tombstone file should have been created
# with this timestamp.
timestamp = utils.Timestamp(start + 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(1, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
# The following request should return a 404, as the object should
# already have been deleted, but it should have also performed a
# container update because the timestamp is newer, and a tombstone
# file should also exist with this timestamp.
timestamp = utils.Timestamp(start + 0.00002)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertTrue(os.path.isfile(objfile))
self.assertEqual(2, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
# The following request should return a 404, as the object should
# already have been deleted, and it should not have performed a
# container update because the timestamp is older, or created a
# tombstone file with this timestamp.
timestamp = utils.Timestamp(start + 0.00001)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(timestamp).internal + '.ts')
self.assertFalse(os.path.isfile(objfile))
self.assertEqual(2, calls_made[0])
self.assertEqual(len(os.listdir(os.path.dirname(objfile))), 1)
finally:
self.object_controller.container_update = orig_cu
def test_DELETE_full_drive(self):
def mock_diskfile_delete(self, timestamp):
raise DiskFileNoSpace()
t_put = utils.Timestamp.now()
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': t_put.internal,
'Content-Length': 0,
'Content-Type': 'plain/text'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
with mock.patch('swift.obj.diskfile.BaseDiskFile.delete',
mock_diskfile_delete):
t_delete = utils.Timestamp.now()
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': t_delete.internal})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
def test_object_update_with_offset(self):
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
# create a new object
create_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body=b'test1',
headers={'X-Timestamp': create_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/plain'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len(b'test1'),
'X-Etag': md5(b'test1', usedforsecurity=False).hexdigest(),
'X-Content-Type': 'text/plain',
'X-Timestamp': create_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back object
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(create_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
create_timestamp)
self.assertEqual(resp.body, b'test1')
# send an update with an offset
offset_timestamp = utils.Timestamp(
create_timestamp, offset=1).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body=b'test2',
headers={'X-Timestamp': offset_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/html'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len(b'test2'),
'X-Etag': md5(b'test2', usedforsecurity=False).hexdigest(),
'X-Content-Type': 'text/html',
'X-Timestamp': offset_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back new offset
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(offset_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
offset_timestamp)
self.assertEqual(resp.body, b'test2')
# now overwrite with a newer time
overwrite_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='PUT', body=b'test3',
headers={'X-Timestamp': overwrite_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'Content-Type': 'text/enriched'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Size': len(b'test3'),
'X-Etag': md5(b'test3', usedforsecurity=False).hexdigest(),
'X-Content-Type': 'text/enriched',
'X-Timestamp': overwrite_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back overwrite
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'],
utils.Timestamp(overwrite_timestamp).normal)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
overwrite_timestamp)
self.assertEqual(resp.body, b'test3')
# delete with an offset
offset_delete = utils.Timestamp(overwrite_timestamp,
offset=1).internal
req = Request.blank('/sda1/p/a/c/o', method='DELETE',
headers={'X-Timestamp': offset_delete,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 204)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'DELETE')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': offset_delete,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back offset delete
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertIsNone(resp.headers['X-Timestamp'])
self.assertEqual(resp.headers['X-Backend-Timestamp'], offset_delete)
# and one more delete with a newer timestamp
delete_timestamp = next(self.ts).internal
req = Request.blank('/sda1/p/a/c/o', method='DELETE',
headers={'X-Timestamp': delete_timestamp,
'X-Container-Host': '10.0.0.1:8080',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p'})
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 404)
self.assertEqual(1, len(container_updates))
for update in container_updates:
ip, port, method, path, headers = update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '8080')
self.assertEqual(method, 'DELETE')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': delete_timestamp,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
container_updates = [] # reset
# read back delete
req = Request.blank('/sda1/p/a/c/o', method='GET')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertIsNone(resp.headers['X-Timestamp'])
self.assertEqual(resp.headers['X-Backend-Timestamp'], delete_timestamp)
def test_call_bad_request(self):
# Test swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
self.object_controller.__call__({'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_call_not_found(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
self.object_controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_call_bad_method(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
self.object_controller.__call__({'REQUEST_METHOD': 'INVALID',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_call_name_collision(self):
def my_check(*args):
return False
def my_hash_path(*args):
return md5(b'collide', usedforsecurity=False).hexdigest()
with mock.patch("swift.obj.diskfile.hash_path", my_hash_path):
with mock.patch("swift.obj.server.check_object_creation",
my_check):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
self.object_controller.__call__({
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'CONTENT_TYPE': 'text/html',
'HTTP_X_TIMESTAMP': normalize_timestamp(1.2),
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '201 ')
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
self.object_controller.__call__({
'REQUEST_METHOD': 'PUT',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/b/d/x',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'CONTENT_TYPE': 'text/html',
'HTTP_X_TIMESTAMP': normalize_timestamp(1.3),
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '403 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.write(args[0])
self.object_controller.__call__({
'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c/o'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.write(args[0])
self.object_controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c/o'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_chunked_put(self):
listener = listen_zero()
port = listener.getsockname()[1]
killer = spawn(wsgi.server, listener, self.object_controller,
NullLogger())
sock = connect_tcp(('localhost', port))
fd = sock.makefile('rwb')
s = 'PUT /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' \
'Content-Type: text/plain\r\n' \
'Connection: close\r\nX-Timestamp: %s\r\n' \
'Transfer-Encoding: chunked\r\n\r\n' \
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'
s = s % normalize_timestamp(1.0)
fd.write(s.encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', port))
fd = sock.makefile('rwb')
fd.write(b'GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
response = fd.read()
self.assertEqual(response, b'oh hai')
killer.kill()
def test_chunked_content_length_mismatch_zero(self):
listener = listen_zero()
port = listener.getsockname()[1]
killer = spawn(wsgi.server, listener, self.object_controller,
NullLogger())
sock = connect_tcp(('localhost', port))
fd = sock.makefile('rwb')
s = 'PUT /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' \
'Content-Type: text/plain\r\n' \
'Connection: close\r\nX-Timestamp: %s\r\n' \
'Content-Length: 0\r\n' \
'Transfer-Encoding: chunked\r\n\r\n' \
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'
s = s % normalize_timestamp(1.0)
fd.write(s.encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', port))
fd = sock.makefile('rwb')
s = 'GET /sda1/p/a/c/o HTTP/1.1\r\n' \
'Host: localhost\r\n' \
'X-Timestamp: %s\r\n' \
'Connection: close\r\n\r\n' % normalize_timestamp(2.0)
fd.write(s.encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
response = fd.read()
self.assertEqual(response, b'oh hai')
killer.kill()
def test_max_object_name_length(self):
timestamp = normalize_timestamp(time())
max_name_len = constraints.MAX_OBJECT_NAME_LENGTH
req = Request.blank(
'/sda1/p/a/c/' + ('1' * max_name_len),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'DATA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/' + ('2' * (max_name_len + 1)),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'DATA'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_max_upload_time(self):
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return b' '
return b''
def set_hundred_continue_response_headers(*a, **kw):
pass
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.object_controller.max_upload_time = 0.1
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': SlowBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 408)
def test_short_body(self):
class ShortBody(object):
def __init__(self):
self.sent = False
def read(self, size=-1):
if not self.sent:
self.sent = True
return b' '
return b''
def set_hundred_continue_response_headers(*a, **kw):
pass
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': ShortBody()},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 499)
def test_bad_sinces(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain'},
body=b' ')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': 'Not a valid date'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': 'Not a valid date'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
too_big_date_list = list(datetime.datetime.max.timetuple())
too_big_date_list[0] += 1 # bump up the year
too_big_date = strftime(
"%a, %d %b %Y %H:%M:%S UTC", struct_time(too_big_date_list))
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': too_big_date})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
def test_content_encoding(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4', 'Content-Type': 'text/plain',
'Content-Encoding': 'gzip'},
body=b' ')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-encoding'], 'gzip')
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-encoding'], 'gzip')
def test_async_update_http_connect(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
given_args = []
def fake_http_connect(*args):
given_args.extend(args)
raise Exception('test')
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
finally:
object_server.http_connect = orig_http_connect
self.assertEqual(
given_args,
['127.0.0.1', '1234', 'sdc1', 1, 'PUT', '/a/c/o', {
'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy)}])
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(37, 'fantastico')])
def test_updating_multiple_delete_at_container_servers(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
policy = random.choice(list(POLICIES))
self.object_controller.expiring_objects_account = 'exp'
self.object_controller.expiring_objects_container_divisor = 60
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return b''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.items()
if v is not None))
return SuccessfulFakeConn()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1',
'X-Delete-At': 9999999999,
'X-Delete-At-Container': '9999999960',
'X-Delete-At-Host': "10.1.1.1:6201,10.2.2.2:6202",
'X-Delete-At-Partition': '6237',
'X-Delete-At-Device': 'sdp,sdq'})
with mock.patch.object(
object_server, 'http_connect', fake_http_connect):
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEqual(len(http_connect_args), 3)
self.assertEqual(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c/o',
'device': 'sdb1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy),
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[1],
{'ipaddr': '10.1.1.1',
'port': '6201',
'path': '/exp/9999999960/9999999999-a/c/o',
'device': 'sdp',
'partition': '6237',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[2],
{'ipaddr': '10.2.2.2',
'port': '6202',
'path': '/exp/9999999960/9999999999-a/c/o',
'device': 'sdq',
'partition': '6237',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
'x-trans-id': '-'})})
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(26, 'twice-thirteen')])
def test_updating_multiple_container_servers(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return b''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.items()
if v is not None))
return SuccessfulFakeConn()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': '26',
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Container-Device': 'sdb1, sdf1'})
with mock.patch.object(
object_server, 'http_connect', fake_http_connect):
with fake_spawn():
req.get_response(self.object_controller)
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEqual(len(http_connect_args), 2)
self.assertEqual(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c/o',
'device': 'sdb1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'X-Backend-Storage-Policy-Index': '26',
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c/o',
'device': 'sdf1',
'partition': '20',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-content-type': 'application/burrito',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-size': '0',
'x-timestamp': utils.Timestamp('12345').internal,
'X-Backend-Storage-Policy-Index': '26',
'referer': 'PUT http://localhost/sda1/p/a/c/o',
'user-agent': 'object-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_object_delete_at_async_update(self):
policy = random.choice(list(POLICIES))
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
# put everything in the future; otherwise setting X-Delete-At may fail
self.ts = make_timestamp_iter(10)
put_timestamp = next(self.ts).internal
delete_at_timestamp = utils.normalize_delete_at_timestamp(
next(self.ts).normal)
delete_at_container = (
int(delete_at_timestamp) //
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'X-Container-Host': '10.0.0.1:6201',
'X-Container-Device': 'sda1',
'X-Container-Partition': 'p',
'X-Delete-At': delete_at_timestamp,
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': 'p',
'X-Delete-At-Host': '10.0.0.2:6202',
'X-Delete-At-Device': 'sda1',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', body=b'', headers=headers)
with mocked_http_conn(
500, 500, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
self.assertEqual(201, resp.status_int, resp.body)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(2, len(container_updates))
delete_at_update, container_update = container_updates
# delete_at_update
ip, port, method, path, headers = delete_at_update
self.assertEqual(ip, '10.0.0.2')
self.assertEqual(port, '6202')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/.expiring_objects/%s/%s-a/c/o' %
(delete_at_container, delete_at_timestamp))
expected = {
'X-Timestamp': put_timestamp,
# system account storage policy is 0
'X-Backend-Storage-Policy-Index': 0,
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
# container_update
ip, port, method, path, headers = container_update
self.assertEqual(ip, '10.0.0.1')
self.assertEqual(port, '6201')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/sda1/p/a/c/o')
expected = {
'X-Timestamp': put_timestamp,
'X-Backend-Storage-Policy-Index': int(policy),
}
for key, value in expected.items():
self.assertEqual(headers[key], str(value))
# check async pendings
async_dir = os.path.join(self.testdir, 'sda1',
diskfile.get_async_dir(policy))
found_files = []
for root, dirs, files in os.walk(async_dir):
for f in files:
async_file = os.path.join(root, f)
found_files.append(async_file)
data = pickle.load(open(async_file, 'rb'))
if data['account'] == 'a':
self.assertEqual(
int(data['headers']
['X-Backend-Storage-Policy-Index']), int(policy))
elif data['account'] == '.expiring_objects':
self.assertEqual(
int(data['headers']
['X-Backend-Storage-Policy-Index']), 0)
else:
self.fail('unexpected async pending data')
self.assertEqual(2, len(found_files))
def test_async_update_saves_on_exception(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = b''
def fake_http_connect(*args):
raise Exception('test')
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': 'set',
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
async_dir = diskfile.get_async_dir(policy)
self.assertEqual(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal), 'rb')),
{'headers': {'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index': int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o', 'op': 'PUT'})
def test_async_update_saves_on_non_2xx(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = b''
def fake_http_connect(status):
class FakeConn(object):
def __init__(self, status):
self.status = status
def getresponse(self):
return self
def read(self):
return b''
return lambda *args: FakeConn(status)
orig_http_connect = object_server.http_connect
try:
for status in (199, 300, 503):
object_server.http_connect = fake_http_connect(status)
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status),
'X-Backend-Storage-Policy-Index': int(policy)}, 'sda1',
policy)
async_dir = diskfile.get_async_dir(policy)
self.assertEqual(
pickle.load(open(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal), 'rb')),
{'headers': {'x-timestamp': '1', 'x-out': str(status),
'user-agent':
'object-server %s' % os.getpid(),
'X-Backend-Storage-Policy-Index':
int(policy)},
'account': 'a', 'container': 'c', 'obj': 'o',
'op': 'PUT'})
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_async_update_does_not_save_on_2xx(self):
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = b''
def fake_http_connect(status):
class FakeConn(object):
def __init__(self, status):
self.status = status
def getresponse(self):
return self
def read(self):
return b''
return lambda *args: FakeConn(status)
orig_http_connect = object_server.http_connect
try:
for status in (200, 299):
object_server.http_connect = fake_http_connect(status)
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status)}, 'sda1', 0)
self.assertFalse(
os.path.exists(os.path.join(
self.testdir, 'sda1', 'async_pending', 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-0000000001.00000')))
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_async_update_saves_on_timeout(self):
policy = random.choice(list(POLICIES))
self._stage_tmp_dir(policy)
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = b''
def fake_http_connect():
class FakeConn(object):
def getresponse(self):
return sleep(1)
return lambda *args: FakeConn()
orig_http_connect = object_server.http_connect
try:
for status in (200, 299):
object_server.http_connect = fake_http_connect()
self.object_controller.node_timeout = 0.001
self.object_controller.async_update(
'PUT', 'a', 'c', 'o', '127.0.0.1:1234', 1, 'sdc1',
{'x-timestamp': '1', 'x-out': str(status)}, 'sda1',
policy)
async_dir = diskfile.get_async_dir(policy)
self.assertTrue(
os.path.exists(os.path.join(
self.testdir, 'sda1', async_dir, 'a83',
'06fbf0b514e5199dfc4e00f42eb5ea83-%s' %
utils.Timestamp(1).internal)))
finally:
object_server.http_connect = orig_http_connect
utils.HASH_PATH_PREFIX = _prefix
def test_container_update_no_async_update(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.container_update(
'PUT', 'a', 'c', 'o', req, {
'x-size': '0', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
'sda1', policy)
self.assertEqual(given_args, [])
def test_container_update_success(self):
container_updates = []
def capture_updates(ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
req = Request.blank(
'/sda1/0/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain'}, body='')
with mocked_http_conn(200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/a/c/o')
self.assertEqual(headers, HeaderKeyDict({
'user-agent': 'object-server %s' % os.getpid(),
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp(1).internal,
'X-Backend-Storage-Policy-Index': '0', # default when not given
'x-trans-id': '123',
'referer': 'PUT http://localhost/sda1/0/a/c/o'}))
def test_PUT_container_update_overrides(self):
def do_test(override_headers):
container_updates = []
def capture_updates(
ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
}
headers.update(override_headers)
req = Request.blank('/sda1/0/a/c/o', method='PUT',
headers=headers, body='')
with mocked_http_conn(
200, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/a/c/o')
self.assertEqual(headers, HeaderKeyDict({
'user-agent': 'object-server %s' % os.getpid(),
'x-size': '0',
'x-etag': 'override_etag',
'x-content-type': 'override_val',
'x-timestamp': ts_put.internal,
'X-Backend-Storage-Policy-Index': '0', # default
'x-trans-id': '123',
'referer': 'PUT http://localhost/sda1/0/a/c/o',
'x-foo': 'bar'}))
# EC policy override headers
do_test({
'X-Backend-Container-Update-Override-Etag': 'override_etag',
'X-Backend-Container-Update-Override-Content-Type': 'override_val',
'X-Backend-Container-Update-Override-Foo': 'bar',
'X-Backend-Container-Ignored': 'ignored'})
# middleware override headers
do_test({
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
'override_val',
'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar',
'X-Object-Sysmeta-Ignored': 'ignored'})
# middleware override headers take precedence over EC policy headers
do_test({
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
'override_val',
'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar',
'X-Backend-Container-Update-Override-Etag': 'ignored',
'X-Backend-Container-Update-Override-Content-Type': 'ignored',
'X-Backend-Container-Update-Override-Foo': 'ignored'})
def test_PUT_container_update_to_old_style_shard(self):
# verify that alternate container update path is respected when
# included in request headers
def do_test(container_path, expected_path, expected_container_path):
policy = random.choice(list(POLICIES))
container_updates = []
def capture_updates(
ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
pickle_async_update_args = []
def fake_pickle_async_update(*args):
pickle_async_update_args.append(args)
diskfile_mgr = self.object_controller._diskfile_router[policy]
diskfile_mgr.pickle_async_update = fake_pickle_async_update
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
'X-Object-Sysmeta-Ec-Frag-Index': 0,
'X-Backend-Storage-Policy-Index': int(policy),
}
if container_path is not None:
headers['X-Backend-Container-Path'] = container_path
req = Request.blank('/sda1/0/a/c/o', method='PUT',
headers=headers, body='')
with mocked_http_conn(
500, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
# verify expected path used in update request
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/%s/o' % expected_path)
# verify that the picked update *always* has root container
self.assertEqual(1, len(pickle_async_update_args))
(objdevice, account, container, obj, data, timestamp,
policy) = pickle_async_update_args[0]
self.assertEqual(objdevice, 'sda1')
self.assertEqual(account, 'a') # NB user account
self.assertEqual(container, 'c') # NB root container
self.assertEqual(obj, 'o')
self.assertEqual(timestamp, ts_put.internal)
self.assertEqual(policy, policy)
expected_data = {
'headers': HeaderKeyDict({
'X-Size': '0',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Content-Type': 'text/plain',
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'Referer': 'PUT http://localhost/sda1/0/a/c/o',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}),
'obj': 'o',
'account': 'a',
'container': 'c',
'op': 'PUT'}
if expected_container_path:
expected_data['container_path'] = expected_container_path
self.assertEqual(expected_data, data)
do_test('a_shard/c_shard', 'a_shard/c_shard', 'a_shard/c_shard')
do_test('', 'a/c', None)
do_test(None, 'a/c', None)
# TODO: should these cases trigger a 400 response rather than
# defaulting to root path?
do_test('garbage', 'a/c', None)
do_test('/', 'a/c', None)
do_test('/no-acct', 'a/c', None)
do_test('no-cont/', 'a/c', None)
do_test('too/many/parts', 'a/c', None)
do_test('/leading/slash', 'a/c', None)
def test_PUT_container_update_to_shard(self):
# verify that alternate container update path is respected when
# included in request headers
def do_test(container_path, expected_path, expected_container_path):
policy = random.choice(list(POLICIES))
container_updates = []
def capture_updates(
ip, port, method, path, headers, *args, **kwargs):
container_updates.append((ip, port, method, path, headers))
pickle_async_update_args = []
def fake_pickle_async_update(*args):
pickle_async_update_args.append(args)
diskfile_mgr = self.object_controller._diskfile_router[policy]
diskfile_mgr.pickle_async_update = fake_pickle_async_update
ts_put = next(self.ts)
headers = {
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
'X-Object-Sysmeta-Ec-Frag-Index': 0,
'X-Backend-Storage-Policy-Index': int(policy),
}
if container_path is not None:
headers['X-Backend-Quoted-Container-Path'] = container_path
req = Request.blank('/sda1/0/a/c/o', method='PUT',
headers=headers, body='')
with mocked_http_conn(
500, give_connect=capture_updates) as fake_conn:
with fake_spawn():
resp = req.get_response(self.object_controller)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(container_updates), 1)
# verify expected path used in update request
ip, port, method, path, headers = container_updates[0]
self.assertEqual(ip, 'chost')
self.assertEqual(port, 'cport')
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/cdevice/cpartition/%s/o' % expected_path)
# verify that the picked update *always* has root container
self.assertEqual(1, len(pickle_async_update_args))
(objdevice, account, container, obj, data, timestamp,
policy) = pickle_async_update_args[0]
self.assertEqual(objdevice, 'sda1')
self.assertEqual(account, 'a') # NB user account
self.assertEqual(container, 'c') # NB root container
self.assertEqual(obj, 'o')
self.assertEqual(timestamp, ts_put.internal)
self.assertEqual(policy, policy)
expected_data = {
'headers': HeaderKeyDict({
'X-Size': '0',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Content-Type': 'text/plain',
'X-Timestamp': ts_put.internal,
'X-Trans-Id': '123',
'Referer': 'PUT http://localhost/sda1/0/a/c/o',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}),
'obj': 'o',
'account': 'a',
'container': 'c',
'op': 'PUT'}
if expected_container_path:
expected_data['container_path'] = expected_container_path
self.assertEqual(expected_data, data)
do_test('a_shard/c_shard', 'a_shard/c_shard', 'a_shard/c_shard')
do_test('', 'a/c', None)
do_test(None, 'a/c', None)
# TODO: should these cases trigger a 400 response rather than
# defaulting to root path?
do_test('garbage', 'a/c', None)
do_test('/', 'a/c', None)
do_test('/no-acct', 'a/c', None)
do_test('no-cont/', 'a/c', None)
do_test('too/many/parts', 'a/c', None)
do_test('/leading/slash', 'a/c', None)
def test_container_update_async(self):
policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/0/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost:cport',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'Content-Type': 'text/plain',
'X-Object-Sysmeta-Ec-Frag-Index': 0,
'X-Backend-Storage-Policy-Index': int(policy)}, body='')
given_args = []
def fake_pickle_async_update(*args):
given_args[:] = args
diskfile_mgr = self.object_controller._diskfile_router[policy]
diskfile_mgr.pickle_async_update = fake_pickle_async_update
with mocked_http_conn(500) as fake_conn, fake_spawn():
resp = req.get_response(self.object_controller)
# fake_spawn() above waits on greenthreads to finish;
# don't start making assertions until then
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 201)
self.assertEqual(len(given_args), 7)
(objdevice, account, container, obj, data, timestamp,
policy) = given_args
self.assertEqual(objdevice, 'sda1')
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
self.assertEqual(timestamp, utils.Timestamp(1).internal)
self.assertEqual(policy, policy)
self.assertEqual(data, {
'headers': HeaderKeyDict({
'X-Size': '0',
'User-Agent': 'object-server %s' % os.getpid(),
'X-Content-Type': 'text/plain',
'X-Timestamp': utils.Timestamp(1).internal,
'X-Trans-Id': '123',
'Referer': 'PUT http://localhost/sda1/0/a/c/o',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}),
'obj': 'o',
'account': 'a',
'container': 'c',
'op': 'PUT'})
def test_container_update_as_greenthread(self):
greenthreads = []
saved_spawn_calls = []
called_async_update_args = []
def local_fake_spawn(func, *a, **kw):
saved_spawn_calls.append((func, a, kw))
return mock.MagicMock()
def local_fake_async_update(*a, **kw):
# just capture the args to see that we would have called
called_async_update_args.append([a, kw])
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': 0,
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1'})
with mock.patch.object(object_server, 'spawn', local_fake_spawn), \
mock.patch.object(self.object_controller, 'async_update',
local_fake_async_update):
resp = req.get_response(self.object_controller)
# check the response is completed and successful
self.assertEqual(resp.status_int, 201)
# check that async_update hasn't been called
self.assertFalse(len(called_async_update_args))
# now do the work in greenthreads
for func, a, kw in saved_spawn_calls:
gt = spawn(func, *a, **kw)
greenthreads.append(gt)
# wait for the greenthreads to finish
for gt in greenthreads:
gt.wait()
# check that the calls to async_update have happened
headers_out = {'X-Size': '0',
'X-Content-Type': 'application/burrito',
'X-Timestamp': '0000012345.00000',
'X-Trans-Id': '-',
'Referer': 'PUT http://localhost/sda1/p/a/c/o',
'X-Backend-Storage-Policy-Index': '0',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e'}
expected = [('PUT', 'a', 'c', 'o', '1.2.3.4:5', '20', 'sdb1',
headers_out, 'sda1', POLICIES[0]),
{'logger_thread_locals': (None, None),
'container_path': None}]
self.assertEqual(called_async_update_args, [expected])
def test_container_update_as_greenthread_with_timeout(self):
# give it one container to update (for only one greenthred)
# fake the greenthred so it will raise a timeout
# test that the right message is logged and the method returns None
called_async_update_args = []
def local_fake_spawn(func, *a, **kw):
m = mock.MagicMock()
def wait_with_error():
raise Timeout()
m.wait = wait_with_error # because raise can't be in a lambda
return m
def local_fake_async_update(*a, **kw):
# just capture the args to see that we would have called
called_async_update_args.append([a, kw])
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Backend-Storage-Policy-Index': 0,
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5',
'X-Container-Device': 'sdb1'})
with mock.patch.object(object_server, 'spawn',
local_fake_spawn):
with mock.patch.object(self.object_controller,
'container_update_timeout',
1.414213562):
resp = req.get_response(self.object_controller)
# check the response is completed and successful
self.assertEqual(resp.status_int, 201)
# check that the timeout was logged
expected_logged_error = "Container update timeout (1.4142s) " \
"waiting for [('1.2.3.4:5', 'sdb1')]"
self.assertTrue(
expected_logged_error in
self.object_controller.logger.get_lines_for_level('debug'))
def test_container_update_bad_args(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Container-Host': 'chost,badhost',
'X-Container-Partition': 'cpartition',
'X-Container-Device': 'cdevice',
'X-Backend-Storage-Policy-Index': int(policy)})
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.container_update(
'PUT', 'a', 'c', 'o', req, {
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain', 'x-timestamp': '1'},
'sda1', policy)
self.assertEqual(given_args, [])
errors = self.object_controller.logger.get_lines_for_level('error')
self.assertEqual(len(errors), 1)
msg = errors[0]
self.assertTrue('Container update failed' in msg)
self.assertTrue('different numbers of hosts and devices' in msg)
self.assertTrue('chost,badhost' in msg)
self.assertTrue('cdevice' in msg)
def test_delete_at_update_cleans_old_entries(self):
# Test how delete_at_update works with a request to overwrite an object
# with delete-at metadata
policy = random.choice(list(POLICIES))
def do_test(method, headers, expected_args):
given_args = []
def fake_async_update(*args):
given_args.extend(args)
headers.update({'X-Timestamp': 1,
'X-Trans-Id': '123',
'X-Backend-Storage-Policy-Index': int(policy)})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': method},
headers=headers)
with mock.patch.object(self.object_controller, 'async_update',
fake_async_update):
self.object_controller.delete_at_update(
'DELETE', 2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(expected_args, given_args)
for method in ('PUT', 'POST', 'DELETE'):
expected_args = [
'DELETE', '.expiring_objects', '0000000000',
'0000000002-a/c/o', None, None,
None, HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '123',
'referer': '%s http://localhost/v1/a/c/o' % method}),
'sda1', policy]
# async_update should be called by default...
do_test(method, {}, expected_args)
do_test(method, {'X-Backend-Clean-Expiring-Object-Queue': 'true'},
expected_args)
do_test(method, {'X-Backend-Clean-Expiring-Object-Queue': 't'},
expected_args)
# ...unless header has a false value
do_test(method, {'X-Backend-Clean-Expiring-Object-Queue': 'false'},
[])
do_test(method, {'X-Backend-Clean-Expiring-Object-Queue': 'f'}, [])
def test_delete_at_negative(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test negative is reset to 0
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234', 'X-Backend-Storage-Policy-Index':
int(policy)})
self.object_controller.delete_at_update(
'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(given_args, [
'DELETE', '.expiring_objects', '0000000000', '0000000000-a/c/o',
None, None, None,
HeaderKeyDict({
# the expiring objects account is always 0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_cap(self):
# Test how delete_at_update works when issued a delete for old
# expiration info after a new put with no new expiration info.
# Test past cap is reset to cap
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
'DELETE', 12345678901, 'a', 'c', 'o', req, 'sda1', policy)
expiring_obj_container = given_args.pop(2)
expected_exp_cont = utils.get_expirer_container(
utils.normalize_delete_at_timestamp(12345678901),
86400, 'a', 'c', 'o')
self.assertEqual(expiring_obj_container, expected_exp_cont)
self.assertEqual(given_args, [
'DELETE', '.expiring_objects', '9999999999-a/c/o',
None, None, None,
HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_put_with_info(self):
# Keep next test,
# test_delete_at_update_put_with_info_but_missing_container, in sync
# with this one but just missing the X-Delete-At-Container header.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Container': '0',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
'X-Delete-At-Device': 'sdc1',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
given_args, [
'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o',
'127.0.0.1:1234',
'3', 'sdc1', HeaderKeyDict({
# the .expiring_objects account is always policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_put_with_info_but_missing_container(self):
# Same as previous test, test_delete_at_update_put_with_info, but just
# missing the X-Delete-At-Container header.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
self.object_controller.logger = self.logger
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Host': '127.0.0.1:1234',
'X-Delete-At-Partition': '3',
'X-Delete-At-Device': 'sdc1',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
self.logger.get_lines_for_level('warning'),
['X-Delete-At-Container header must be specified for expiring '
'objects background PUT to work properly. Making best guess as '
'to the container name for now.'])
self.assertEqual(
given_args, [
'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o',
'127.0.0.1:1234',
'3', 'sdc1', HeaderKeyDict({
# the .expiring_objects account is always policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_put_with_info_but_missing_host(self):
# Same as test_delete_at_update_put_with_info, but just
# missing the X-Delete-At-Host header.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
self.object_controller.logger = self.logger
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Container': '0',
'X-Delete-At-Partition': '3',
'X-Delete-At-Device': 'sdc1',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertFalse(self.logger.get_lines_for_level('warning'))
self.assertEqual(given_args, [])
def test_delete_at_update_put_with_info_but_empty_host(self):
# Same as test_delete_at_update_put_with_info, but empty
# X-Delete-At-Host header and no X-Delete-At-Partition nor
# X-Delete-At-Device.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
self.object_controller.logger = self.logger
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Delete-At-Container': '0',
'X-Delete-At-Host': '',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('PUT', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertFalse(self.logger.get_lines_for_level('warning'))
self.assertEqual(
given_args, [
'PUT', '.expiring_objects', '0000000000', '0000000002-a/c/o',
None,
None, None, HeaderKeyDict({
# the .expiring_objects account is always policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-size': '0',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-content-type': 'text/plain',
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'PUT http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_at_update_delete(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update('DELETE', 2, 'a', 'c', 'o',
req, 'sda1', policy)
self.assertEqual(
given_args, [
'DELETE', '.expiring_objects', '0000000000',
'0000000002-a/c/o', None, None,
None, HeaderKeyDict({
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': utils.Timestamp('1').internal,
'x-trans-id': '1234',
'referer': 'DELETE http://localhost/v1/a/c/o'}),
'sda1', policy])
def test_delete_backend_replication(self):
# If X-Backend-Replication: True delete_at_update should completely
# short-circuit.
policy = random.choice(list(POLICIES))
given_args = []
def fake_async_update(*args):
given_args.extend(args)
self.object_controller.async_update = fake_async_update
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 1,
'X-Trans-Id': '1234',
'X-Backend-Replication': 'True',
'X-Backend-Storage-Policy-Index': int(policy)})
self.object_controller.delete_at_update(
'DELETE', -2, 'a', 'c', 'o', req, 'sda1', policy)
self.assertEqual(given_args, [])
def test_POST_calls_delete_at(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 2})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [])
sleep(.00001)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(given_args, [])
sleep(.00001)
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = str(int(time() + 1000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp1,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp1,
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
delete_at_timestamp2 = str(int(time() + 2000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp2,
'X-Backend-Storage-Policy-Index': int(policy)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
def test_PUT_calls_delete_at(self):
policy = random.choice(list(POLICIES))
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 4})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [])
sleep(.00001)
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = str(int(time() + 1000))
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp1,
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
delete_at_timestamp2 = str(int(time() + 2000))
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp2,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp2,
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 3})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
given_args[5], 'sda1', policy,
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', policy])
def test_GET_but_expired(self):
# Start off with an existing object that will expire
now = time()
delete_at_timestamp = int(now + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(now),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# It expires in the future, so it's accessible via GET
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': normalize_timestamp(now)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# It expires in the past, so it's not accessible via GET...
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': normalize_timestamp(
delete_at_timestamp + 1)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(now))
# ...unless X-Backend-Replication is sent
expected = {
'GET': b'TEST',
'HEAD': b'',
}
for meth, expected_body in expected.items():
req = Request.blank(
'/sda1/p/a/c/o', method=meth,
headers={'X-Timestamp':
normalize_timestamp(delete_at_timestamp + 1),
'X-Backend-Replication': 'True'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(expected_body, resp.body)
def test_HEAD_but_expired(self):
# We have an object that expires in the future
now = time()
delete_at_timestamp = int(now + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(now),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = b'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# It's accessible since it expires in the future
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': normalize_timestamp(now)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
# It's not accessible now since it expires in the past
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': normalize_timestamp(
delete_at_timestamp + 1)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'],
utils.Timestamp(now))
def test_POST_but_expired(self):
now = time()
delete_at_timestamp = int(now + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
# We recreate the test object every time to ensure a clean test; a
# POST may change attributes of the object, so it's not safe to
# re-use.
def recreate_test_object(when):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(when),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# You can POST to a not-yet-expired object
recreate_test_object(now)
the_time = now + 1
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(the_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# You cannot POST to an expired object
now += 2
recreate_test_object(now)
the_time = delete_at_timestamp + 1
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(the_time)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
# ...unless sending an x-backend-replication header...which lets you
# modify x-delete-at
now += 2
recreate_test_object(now)
the_time = delete_at_timestamp + 2
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(the_time),
'x-backend-replication': 'true',
'x-delete-at': str(delete_at_timestamp + 100)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
# ...so the object becomes accessible again even without an
# x-backend-replication header
the_time = delete_at_timestamp + 3
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(the_time),
'x-delete-at': str(delete_at_timestamp + 101)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def test_DELETE_can_skip_updating_expirer_queue(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
delete_time = test_time + 1
delete_at_timestamp = int(test_time + 10000)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(delete_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'X-If-Delete-At': str(delete_at_timestamp)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
async_pending_dir = os.path.join(
self.testdir, 'sda1', diskfile.get_async_dir(policy))
# empty dir or absent dir, either is fine
try:
self.assertEqual([], os.listdir(async_pending_dir))
except OSError as err:
self.assertEqual(err.errno, errno.ENOENT)
def test_x_if_delete_at_formats(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
delete_time = test_time + 1
delete_at_timestamp = int(test_time + 10000)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
def do_test(if_delete_at, expected_status):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(delete_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'X-If-Delete-At': if_delete_at})
# Again, we don't care about async_pending files (for this test)
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, expected_status)
# Clean up the tombstone
objfile = self.df_mgr.get_diskfile('sda1', 'p', 'a', 'c', 'o',
policy=policy)
files = os.listdir(objfile._datadir)
self.assertEqual(len(files), 1,
'Expected to find one file, got %r' % files)
if expected_status == 204:
self.assertTrue(files[0].endswith('.ts'),
'Expected a tombstone, found %r' % files[0])
else:
self.assertTrue(files[0].endswith('.data'),
'Expected a data file, found %r' % files[0])
os.unlink(os.path.join(objfile._datadir, files[0]))
# More as a reminder than anything else
self.assertIsInstance(delete_at_timestamp, int)
do_test(str(delete_at_timestamp), 204)
do_test(str(delete_at_timestamp) + ':', 400)
do_test(Timestamp(delete_at_timestamp).isoformat, 400)
do_test(Timestamp(delete_at_timestamp).normal, 204)
do_test(Timestamp(delete_at_timestamp, delta=1).normal, 412)
do_test(Timestamp(delete_at_timestamp, delta=-1).normal, 412)
do_test(Timestamp(delete_at_timestamp, offset=1).internal, 412)
do_test(Timestamp(delete_at_timestamp, offset=15).internal, 412)
def test_DELETE_but_expired(self):
test_time = time() + 10000
delete_at_timestamp = int(test_time + 100)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 2000),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(
delete_at_timestamp + 1)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_DELETE_if_delete_at_expired_still_deletes(self):
test_time = time() + 10
test_timestamp = normalize_timestamp(test_time)
delete_at_time = int(test_time + 10)
delete_at_timestamp = str(delete_at_time)
expired_time = delete_at_time + 1
expired_timestamp = normalize_timestamp(expired_time)
delete_at_container = str(
delete_at_time /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': test_timestamp,
'X-Delete-At': delete_at_timestamp,
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# sanity
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': test_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'TEST')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.get_data_dir(POLICIES[0]), 'p',
hash_path('a', 'c', 'o')),
utils.Timestamp(test_timestamp).internal + '.data')
self.assertTrue(os.path.isfile(objfile))
# move time past expiry
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': expired_timestamp})
resp = req.get_response(self.object_controller)
# request will 404
self.assertEqual(resp.status_int, 404)
# but file still exists
self.assertTrue(os.path.isfile(objfile))
# make the x-if-delete-at with some wrong bits
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': int(delete_at_time + 1)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
self.assertTrue(os.path.isfile(objfile))
# make the x-if-delete-at with all the right bits
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
self.assertFalse(os.path.isfile(objfile))
# make the x-if-delete-at with all the right bits (again)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
self.assertFalse(os.path.isfile(objfile))
# overwrite with new content
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'X-Timestamp': str(test_time + 100),
'Content-Length': '0',
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201, resp.body)
# simulate processing a stale expirer queue entry
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 409)
# make the x-if-delete-at for some not found
req = Request.blank(
'/sda1/p/a/c/o-not-found',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_at_timestamp,
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
def test_DELETE_if_delete_at(self):
test_time = time() + 10000
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 99),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 98)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
delete_at_timestamp = int(test_time - 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 97),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 95),
'X-If-Delete-At': str(int(test_time))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 95)})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
delete_at_timestamp = int(test_time - 1)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(test_time - 94),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': str(int(test_time))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': delete_at_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(test_time - 92),
'X-If-Delete-At': 'abc'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_calls_delete_at(self):
given_args = []
def fake_delete_at_update(*args):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = int(time() + 1000)
delete_at_container1 = str(
delete_at_timestamp1 /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp1,
'Content-Length': '4',
'Content-Type': 'application/octet-stream',
'X-Delete-At': str(delete_at_timestamp1),
'X-Delete-At-Container': delete_at_container1})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', POLICIES[0]])
while given_args:
given_args.pop()
sleep(.00001)
timestamp2 = normalize_timestamp(time())
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/octet-stream'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(given_args, [
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
given_args[5], 'sda1', POLICIES[0]])
def test_PUT_can_skip_updating_expirer_queue(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
overwrite_time = test_time + 1
delete_at_timestamp = int(test_time + 10000)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# Overwrite with a non-expiring object
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(overwrite_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'Content-Length': '9',
'Content-Type': 'application/octet-stream'})
req.body = 'new stuff'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
async_pending_dir = os.path.join(
self.testdir, 'sda1', diskfile.get_async_dir(policy))
# empty dir or absent dir, either is fine
try:
self.assertEqual([], os.listdir(async_pending_dir))
except OSError as err:
self.assertEqual(err.errno, errno.ENOENT)
def test_PUT_can_skip_deleting_expirer_queue_but_still_inserts(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
overwrite_time = test_time + 1
delete_at_timestamp_1 = int(test_time + 10000)
delete_at_timestamp_2 = int(test_time + 20000)
delete_at_container_1 = str(
delete_at_timestamp_1 /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
delete_at_container_2 = str(
delete_at_timestamp_2 /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp_1),
'X-Delete-At-Container': delete_at_container_1,
'X-Delete-At-Host': '1.2.3.4',
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# Overwrite with an expiring object
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(overwrite_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'X-Delete-At': str(delete_at_timestamp_2),
'X-Delete-At-Container': delete_at_container_2,
'X-Delete-At-Host': '1.2.3.4',
'Content-Length': '9',
'Content-Type': 'application/octet-stream'})
req.body = 'new stuff'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
async_pendings = []
async_pending_dir = os.path.join(
self.testdir, 'sda1', diskfile.get_async_dir(policy))
for dirpath, _, filenames in os.walk(async_pending_dir):
for filename in filenames:
async_pendings.append(os.path.join(dirpath, filename))
self.assertEqual(len(async_pendings), 1)
async_pending_ops = []
for pending_file in async_pendings:
with open(pending_file, 'rb') as fh:
async_pending = pickle.load(fh)
async_pending_ops.append(async_pending['op'])
self.assertEqual(async_pending_ops, ['PUT'])
def test_PUT_delete_at_in_past(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'X-Delete-At': str(int(time() - 1)),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue(b'X-Delete-At in past' in resp.body)
def test_POST_can_skip_updating_expirer_queue(self):
policy = POLICIES.get_by_index(0)
test_time = time()
put_time = test_time
overwrite_time = test_time + 1
delete_at_timestamp = int(test_time + 10000)
delete_at_container = str(
delete_at_timestamp /
self.object_controller.expiring_objects_container_divisor *
self.object_controller.expiring_objects_container_divisor)
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(put_time),
'X-Delete-At': str(delete_at_timestamp),
'X-Delete-At-Container': delete_at_container,
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
# Mock out async_update so we don't get any async_pending files.
with mock.patch.object(self.object_controller, 'async_update'):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# POST to remove X-Delete-At
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(overwrite_time),
'X-Backend-Clean-Expiring-Object-Queue': 'false',
'X-Delete-At': ''})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
async_pending_dir = os.path.join(
self.testdir, 'sda1', diskfile.get_async_dir(policy))
# empty dir or absent dir, either is fine
try:
self.assertEqual([], os.listdir(async_pending_dir))
except OSError as err:
self.assertEqual(err.errno, errno.ENOENT)
def test_POST_delete_at_in_past(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time() + 1),
'X-Delete-At': str(int(time() - 1))})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 400)
self.assertTrue(b'X-Delete-At in past' in resp.body)
def test_POST_delete_at_in_past_with_skewed_clock(self):
proxy_server_put_time = 1000
proxy_server_post_time = 1001
delete_at = 1050
obj_server_put_time = 1100
obj_server_post_time = 1101
# test setup: make an object for us to POST to
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(proxy_server_put_time),
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
with mock.patch('swift.obj.server.time.time',
return_value=obj_server_put_time):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
# then POST to it
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp':
normalize_timestamp(proxy_server_post_time),
'X-Delete-At': str(delete_at)})
with mock.patch('swift.obj.server.time.time',
return_value=obj_server_post_time):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 202)
def test_REPLICATE_works(self):
def fake_get_hashes(*args, **kwargs):
return 0, {1: 2}
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
with mock.patch.object(diskfile.DiskFileManager, '_get_hashes',
fake_get_hashes), \
mock.patch.object(tpool, 'execute', my_tpool_execute), \
mock.patch('swift.obj.diskfile.os.path.exists',
return_value=True):
req = Request.blank('/sda1/p/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
p_data = pickle.loads(resp.body)
self.assertEqual(p_data, {1: 2})
def test_REPLICATE_pickle_protocol(self):
def fake_get_hashes(*args, **kwargs):
return 0, {1: 2}
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
with mock.patch.object(diskfile.DiskFileManager, '_get_hashes',
fake_get_hashes), \
mock.patch.object(tpool, 'execute', my_tpool_execute), \
mock.patch('swift.obj.server.pickle.dumps') as fake_pickle, \
mock.patch('swift.obj.diskfile.os.path.exists',
return_value=True):
req = Request.blank('/sda1/p/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
fake_pickle.return_value = b''
req.get_response(self.object_controller)
# This is the key assertion: starting in Python 3.0, the
# default protocol version is 3, but such pickles can't be read
# on Python 2. As long as we may need to talk to a Python 2
# process, we need to cap our protocol version.
fake_pickle.assert_called_once_with({1: 2}, protocol=2)
def test_REPLICATE_timeout(self):
def fake_get_hashes(*args, **kwargs):
raise Timeout()
def my_tpool_execute(func, *args, **kwargs):
return func(*args, **kwargs)
with mock.patch.object(diskfile.DiskFileManager, '_get_hashes',
fake_get_hashes), \
mock.patch.object(tpool, 'execute', my_tpool_execute), \
mock.patch('swift.obj.diskfile.os.path.exists',
return_value=True):
diskfile.DiskFileManager._get_hashes = fake_get_hashes
tpool.execute = my_tpool_execute
req = Request.blank('/sda1/p/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
self.assertRaises(Timeout, self.object_controller.REPLICATE, req)
def test_REPLICATE_reclaims_tombstones(self):
conf = {'devices': self.testdir, 'mount_check': False,
'reclaim_age': 100}
self.object_controller = object_server.ObjectController(
conf, logger=self.logger)
for policy in self.iter_policies():
# create a tombstone
ts = next(self.ts)
delete_request = Request.blank(
'/sda1/0/a/c/o', method='DELETE',
headers={
'x-backend-storage-policy-index': int(policy),
'x-timestamp': ts.internal,
})
resp = delete_request.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
objfile = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o',
policy=policy)
tombstone_file = os.path.join(objfile._datadir,
'%s.ts' % ts.internal)
self.assertTrue(os.path.exists(tombstone_file))
# REPLICATE will hash it
req = Request.blank(
'/sda1/0', method='REPLICATE',
headers={
'x-backend-storage-policy-index': int(policy),
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
suffixes = list(pickle.loads(resp.body).keys())
self.assertEqual(1, len(suffixes),
'Expected just one suffix; got %r' % (suffixes,))
suffix = suffixes[0]
self.assertEqual(suffix, os.path.basename(
os.path.dirname(objfile._datadir)))
# tombstone still exists
self.assertTrue(os.path.exists(tombstone_file))
# after reclaim REPLICATE will mark invalid (but NOT rehash!)
replicate_request = Request.blank(
'/sda1/0/%s' % suffix, method='REPLICATE',
headers={
'x-backend-storage-policy-index': int(policy),
})
with mock.patch('swift.obj.diskfile.time.time',
return_value=time() + 200):
resp = replicate_request.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(None, pickle.loads(resp.body))
# no rehash means tombstone still exists...
self.assertTrue(os.path.exists(tombstone_file))
# but at some point (like the next pre-sync REPLICATE) it rehashes
replicate_request = Request.blank(
'/sda1/0/', method='REPLICATE',
headers={
'x-backend-storage-policy-index': int(policy),
})
with mock.patch('swift.obj.diskfile.time.time',
return_value=time() + 200):
resp = replicate_request.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual({}, pickle.loads(resp.body))
# and tombstone is reaped!
self.assertFalse(os.path.exists(tombstone_file))
# N.B. with a small reclaim age like this - if proxy clocks get far
# enough out of whack ...
with mock.patch('swift.obj.diskfile.time.time',
return_value=time() + 200):
resp = delete_request.get_response(self.object_controller)
# we won't even create the tombstone
self.assertFalse(os.path.exists(tombstone_file))
# hashdir's empty, so it gets cleaned up
self.assertFalse(os.path.exists(objfile._datadir))
def test_SSYNC_can_be_called(self):
req = Request.blank('/sda1/0',
environ={'REQUEST_METHOD': 'SSYNC'},
headers={})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual('True',
resp.headers.get('X-Backend-Accept-No-Commit'))
def test_PUT_with_full_drive(self):
class IgnoredBody(object):
def __init__(self):
self.read_called = False
def read(self, size=-1):
if not self.read_called:
self.read_called = True
return b'VERIFY'
return b''
def fake_fallocate(fd, size):
raise OSError(errno.ENOSPC, os.strerror(errno.ENOSPC))
orig_fallocate = diskfile.fallocate
try:
diskfile.fallocate = fake_fallocate
timestamp = normalize_timestamp(time())
body_reader = IgnoredBody()
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': body_reader},
headers={'X-Timestamp': timestamp,
'Content-Length': '6',
'Content-Type': 'application/octet-stream',
'Expect': '100-continue'})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 507)
self.assertFalse(body_reader.read_called)
finally:
diskfile.fallocate = orig_fallocate
def test_global_conf_callback_does_nothing(self):
preloaded_app_conf = {}
global_conf = {}
object_server.global_conf_callback(preloaded_app_conf, global_conf)
self.assertEqual(preloaded_app_conf, {})
self.assertEqual(list(global_conf.keys()), ['replication_semaphore'])
try:
value = global_conf['replication_semaphore'][0].get_value()
except NotImplementedError:
# On some operating systems (at a minimum, OS X) it's not possible
# to introspect the value of a semaphore
raise unittest.SkipTest
else:
self.assertEqual(value, 4)
def test_global_conf_callback_replication_semaphore(self):
preloaded_app_conf = {'replication_concurrency': 123}
global_conf = {}
with mock.patch.object(
object_server.multiprocessing, 'BoundedSemaphore',
return_value='test1') as mocked_Semaphore:
object_server.global_conf_callback(preloaded_app_conf, global_conf)
self.assertEqual(preloaded_app_conf, {'replication_concurrency': 123})
self.assertEqual(global_conf, {'replication_semaphore': ['test1']})
mocked_Semaphore.assert_called_once_with(123)
def test_handling_of_replication_semaphore_config(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
objsrv = object_server.ObjectController(conf)
self.assertTrue(objsrv.replication_semaphore is None)
conf['replication_semaphore'] = ['sema']
objsrv = object_server.ObjectController(conf)
self.assertEqual(objsrv.replication_semaphore, 'sema')
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertTrue(
object_server.ObjectController(conf).replication_server)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(
object_server.ObjectController(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(
object_server.ObjectController(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE', 'SSYNC']
for method_name in obj_methods:
method = getattr(self.object_controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.object_controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.app_factory(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
# Sends args to outbuf
outbuf.write(args[0])
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x:
mock.MagicMock(return_value=method_res))
with mock.patch.object(self.object_controller, method,
new=mock_method):
response = self.object_controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.obj.server.ObjectController.__call__
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'}, logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.write(args[0])
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = [b'<html><h1>Method Not Allowed</h1><p>The method is not '
b'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.object_controller, method,
new=mock_method):
mock_method.replication = True
with mock.patch('time.time',
mock.MagicMock(side_effect=[10000.0,
10001.0, 10001.0])):
with mock.patch('os.getpid',
mock.MagicMock(return_value=1234)):
response = self.object_controller.__call__(
env, start_response)
self.assertEqual(response, answer)
self.assertEqual(
self.logger.get_lines_for_level('info'),
['- - - [01/Jan/1970:02:46:41 +0000] "PUT'
' /sda1/p/a/c/o" 405 91 "-" "-" "-" 1.0000 "-"'
' 1234 -'])
def test_replication_server_call_all_methods(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'}, logger=debug_logger())
def start_response(*args):
"""Sends args to outbuf"""
outbuf.write(args[0])
obj_methods = ['PUT', 'HEAD', 'GET', 'POST', 'DELETE', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'HTTP_X_TIMESTAMP': next(self.ts).internal,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_TYPE': 'text/plain',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.object_controller(env, start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertIn(outbuf.getvalue()[:4], ('201 ', '204 ', '200 '))
def test_create_reserved_namespace_object(self):
path = '/sda1/p/a/%sc/%so' % (utils.RESERVED_STR, utils.RESERVED_STR)
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'Content-Length': 0,
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status, '201 Created')
def test_create_reserved_namespace_object_in_user_container(self):
path = '/sda1/p/a/c/%so' % utils.RESERVED_STR
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'Content-Length': 0,
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status, '400 Bad Request', resp.body)
self.assertEqual(resp.body, b'Invalid reserved-namespace object in '
b'user-namespace container')
def test_other_methods_reserved_namespace_object(self):
container = get_reserved_name('c')
obj = get_reserved_name('o', 'v1')
path = '/sda1/p/a/%s/%s' % (container, obj)
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'Content-Length': 0,
})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status, '201 Created')
bad_req = Request.blank('/sda1/p/a/c/%s' % obj, method='PUT', headers={
'X-Timestamp': next(self.ts).internal})
resp = bad_req.get_response(self.object_controller)
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, b'Invalid reserved-namespace object '
b'in user-namespace container')
for method in ('GET', 'POST', 'DELETE'):
req.method = method
req.headers['X-Timestamp'] = next(self.ts).internal
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int // 100, 2)
bad_req.method = method
req.headers['X-Timestamp'] = next(self.ts).internal
resp = bad_req.get_response(self.object_controller)
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, b'Invalid reserved-namespace object '
b'in user-namespace container')
def test_not_utf8_and_not_logging_requests(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=debug_logger())
def start_response(*args):
# Sends args to outbuf
outbuf.write(args[0])
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/\xd8\x3e%20/%',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = [b'Invalid UTF8 or contains NULL']
mock_method = public(lambda x: mock.MagicMock())
with mock.patch.object(self.object_controller, method,
new=mock_method):
response = self.object_controller.__call__(env, start_response)
self.assertEqual(response, answer)
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test__call__returns_500(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.logger = debug_logger('test')
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.write(args[0])
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
@public
def mock_put_method(*args, **kwargs):
raise Exception()
with mock.patch.object(self.object_controller, method,
new=mock_put_method):
response = self.object_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
b'Traceback (most recent call last):'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR __call__ error with %(method)s %(path)s : ' % {
'method': 'PUT', 'path': '/sda1/p/a/c/o'},
])
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test_PUT_slow(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.object_controller = object_server.ObjectController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false',
'slow': '10'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.write(args[0])
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c/o',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
mock_method = public(lambda x: mock.MagicMock())
with mock.patch.object(self.object_controller, method,
new=mock_method):
with mock.patch('time.time',
mock.MagicMock(side_effect=[10000.0,
10001.0])):
with mock.patch('swift.obj.server.sleep',
mock.MagicMock()) as ms:
self.object_controller.__call__(env, start_response)
ms.assert_called_with(9)
self.assertEqual(self.logger.get_lines_for_level('info'),
[])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.object_controller.logger = self.logger
with mock.patch('time.time',
side_effect=[10000.0, 10000.0, 10001.0, 10002.0,
10002.0, 10002.0]), \
mock.patch('os.getpid', return_value=1234):
req.get_response(self.object_controller)
self.assertEqual(
self.logger.get_lines_for_level('info'),
['1.2.3.4 - - [01/Jan/1970:02:46:42 +0000] "HEAD /sda1/p/a/c/o" '
'404 - "-" "-" "-" 2.0000 "-" 1234 -'])
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False)])
def test_dynamic_datadir(self):
# update router post patch
self.object_controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.object_controller.logger)
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Backend-Storage-Policy-Index': 1,
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects-1"
self.assertFalse(os.path.isdir(object_dir))
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
# make sure no idx in header uses policy 0 data_dir
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects"
self.assertFalse(os.path.isdir(object_dir))
with mock.patch.object(POLICIES, 'get_by_index',
lambda _: True):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
def test_storage_policy_index_is_validated(self):
# sanity check that index for existing policy is ok
methods = ('PUT', 'POST', 'GET', 'HEAD', 'REPLICATE', 'DELETE')
valid_indices = sorted([int(policy) for policy in POLICIES])
for index in valid_indices:
object_dir = self.testdir + "/sda1/objects"
if index > 0:
object_dir = "%s-%s" % (object_dir, index)
self.assertFalse(os.path.isdir(object_dir))
for method in methods:
headers = {
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': index}
if POLICIES[index].policy_type == EC_POLICY:
headers['X-Object-Sysmeta-Ec-Frag-Index'] = '2'
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': method},
headers=headers)
req.body = 'VERIFY'
resp = req.get_response(self.object_controller)
self.assertTrue(is_success(resp.status_int),
'%s method failed: %r' % (method, resp.status))
# index for non-existent policy should return 503
index = valid_indices[-1] + 1
for method in methods:
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': method},
headers={
'X-Timestamp': next(self.ts).internal,
'Content-Type': 'application/x-test',
'X-Backend-Storage-Policy-Index': index})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects-%s" % index
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 503)
self.assertFalse(os.path.isdir(object_dir))
def test_race_doesnt_quarantine(self):
existing_timestamp = normalize_timestamp(time())
delete_timestamp = normalize_timestamp(time() + 1)
put_timestamp = normalize_timestamp(time() + 2)
head_timestamp = normalize_timestamp(time() + 3)
# make a .ts
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': existing_timestamp})
req.get_response(self.object_controller)
# force a PUT between the listdir and read_metadata of a DELETE
put_once = [False]
orig_listdir = os.listdir
def mock_listdir(path):
listing = orig_listdir(path)
if not put_once[0]:
put_once[0] = True
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_timestamp,
'Content-Length': '9',
'Content-Type': 'application/octet-stream'})
req.body = 'some data'
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 201)
return listing
with mock.patch('os.listdir', mock_listdir):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': delete_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
qdir = os.path.join(self.testdir, 'sda1', 'quarantined')
self.assertFalse(os.path.exists(qdir))
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'X-Timestamp': head_timestamp})
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Timestamp'], put_timestamp)
def test_multiphase_put_draining(self):
# We want to ensure that we read the whole response body even if
# it's multipart MIME and there's document parts that we don't
# expect or understand. This'll help save our bacon if we ever jam
# more stuff in there.
in_a_timeout = [False]
# inherit from BaseException so we get a stack trace when the test
# fails instead of just a 500
class NotInATimeout(BaseException):
pass
class FakeTimeout(BaseException):
def __enter__(self):
in_a_timeout[0] = True
def __exit__(self, typ, value, tb):
in_a_timeout[0] = False
class PickyWsgiBytesIO(WsgiBytesIO):
def read(self, *a, **kw):
if not in_a_timeout[0]:
raise NotInATimeout()
return WsgiBytesIO.read(self, *a, **kw)
def readline(self, *a, **kw):
if not in_a_timeout[0]:
raise NotInATimeout()
return WsgiBytesIO.readline(self, *a, **kw)
test_data = b'obj data'
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "7",
"Etag": md5(test_data, usedforsecurity=False).hexdigest(),
}
footer_json = json.dumps(footer_meta).encode('ascii')
footer_meta_cksum = md5(
footer_json, usedforsecurity=False).hexdigest().encode('ascii')
test_doc = b"\r\n".join((
b"--boundary123",
b"X-Document: object body",
b"",
test_data,
b"--boundary123",
b"X-Document: object metadata",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_json,
b"--boundary123",
b"X-Document: we got cleverer",
b"",
b"stuff stuff meaningless stuuuuuuuuuuff",
b"--boundary123",
b"X-Document: we got even cleverer; can you believe it?",
b"Waneshaft: ambifacient lunar",
b"Casing: malleable logarithmic",
b"",
b"potato potato potato potato potato potato potato",
b"--boundary123--"
))
# phase1 - PUT request with object metadata in footer and
# multiphase commit conversation
put_timestamp = utils.Timestamp.now().internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
}
wsgi_input = PickyWsgiBytesIO(test_doc)
req = Request.blank(
"/sda1/0/a/c/o",
environ={'REQUEST_METHOD': 'PUT', 'wsgi.input': wsgi_input},
headers=headers)
app = object_server.ObjectController(self.conf, logger=self.logger)
with mock.patch('swift.obj.server.ChunkReadTimeout', FakeTimeout):
resp = req.get_response(app)
self.assertEqual(resp.status_int, 201) # sanity check
in_a_timeout[0] = True # so we can check without an exception
self.assertEqual(wsgi_input.read(), b'') # we read all the bytes
@patch_policies(test_policies)
class TestObjectServer(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
# dirs
self.tmpdir = mkdtemp()
self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server')
self.devices = os.path.join(self.tempdir, 'srv/node')
for device in ('sda1', 'sdb1'):
os.makedirs(os.path.join(self.devices, device))
self.conf = {
'devices': self.devices,
'swift_dir': self.tempdir,
'mount_check': 'false',
# hopefully 1s is long enough to improve gate reliability?
'client_timeout': 1,
}
self.logger = debug_logger('test-object-server')
self.app = object_server.ObjectController(
self.conf, logger=self.logger)
sock = listen_zero()
self.server = spawn(wsgi.server, sock, self.app, utils.NullLogger())
self.port = sock.getsockname()[1]
def tearDown(self):
rmtree(self.tmpdir)
def test_not_found(self):
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'GET', '/a/c/o')
resp = conn.getresponse()
self.assertEqual(resp.status, 404)
resp.read()
resp.close()
def test_expect_on_put(self):
test_body = b'test'
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'Content-Type': 'application/test',
'X-Timestamp': utils.Timestamp.now().internal,
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
conn.send(test_body)
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
def test_expect_on_put_footer(self):
test_body = b'test'
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'Content-Type': 'application/test',
'X-Timestamp': utils.Timestamp.now().internal,
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Obj-Metadata-Footer'], 'yes')
resp.close()
def test_expect_on_put_conflict(self):
test_body = b'test'
put_timestamp = utils.Timestamp.now()
headers = {
'Expect': '100-continue',
'Content-Length': len(test_body),
'Content-Type': 'application/test',
'X-Timestamp': put_timestamp.internal,
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
conn.send(test_body)
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# and again with same timestamp
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 409)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Backend-Timestamp'], put_timestamp)
resp.read()
resp.close()
def test_multiphase_put_no_mime_boundary(self):
test_data = b'obj data'
put_timestamp = utils.Timestamp.now().internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 400)
resp.read()
resp.close()
def test_expect_on_multiphase_put_diconnect(self):
put_timestamp = utils.Timestamp.now().internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': 0,
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
conn = bufferedhttp.http_connect('127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
headers = HeaderKeyDict(resp.getheaders())
self.assertEqual(headers['X-Obj-Multiphase-Commit'], 'yes')
conn.send(b'c\r\n--boundary123\r\n')
# disconnect client
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
for i in range(2):
sleep(0)
self.assertFalse(self.logger.get_lines_for_level('error'))
for line in self.logger.get_lines_for_level('info'):
self.assertIn(' 499 ', line)
def find_files(self):
ignore_files = {'.lock', 'hashes.invalid'}
found_files = defaultdict(list)
for root, dirs, files in os.walk(self.devices):
for filename in files:
if filename in ignore_files:
continue
_name, ext = os.path.splitext(filename)
file_path = os.path.join(root, filename)
found_files[ext].append(file_path)
return found_files
@contextmanager
def _check_multiphase_put_commit_handling(self,
test_doc=None,
headers=None,
finish_body=True):
"""
This helper will setup a multiphase chunked PUT request and yield at
the context at the commit phase (after getting the second expect-100
continue response.
It can setup a reasonable stub request, but you can over-ride some
characteristics of the request via kwargs.
:param test_doc: first part of the mime conversation before the object
server will send the 100-continue, this includes the
object body
:param headers: headers to send along with the initial request; some
object-metadata (e.g. X-Backend-Obj-Content-Length)
is generally expected to match the test_doc)
:param finish_body: boolean, if true send "0\r\n\r\n" after test_doc
and wait for 100-continue before yielding context
"""
test_data = encode_frag_archive_bodies(POLICIES[1], b'obj data')[0]
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "2",
"Etag": md5(test_data, usedforsecurity=False).hexdigest(),
}
footer_json = json.dumps(footer_meta).encode('ascii')
footer_meta_cksum = md5(
footer_json, usedforsecurity=False).hexdigest().encode('ascii')
test_doc = test_doc or b"\r\n".join((
b"--boundary123",
b"X-Document: object body",
b"",
test_data,
b"--boundary123",
b"X-Document: object metadata",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_json,
b"--boundary123",
))
# phase1 - PUT request with object metadata in footer and
# multiphase commit conversation
headers = headers or {
'Content-Type': 'text/plain',
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Metadata-Footer': 'yes',
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
put_timestamp = utils.Timestamp(headers.setdefault(
'X-Timestamp', utils.Timestamp.now().internal))
container_update = \
'swift.obj.server.ObjectController.container_update'
with mock.patch(container_update) as _container_update:
conn = bufferedhttp.http_connect(
'127.0.0.1', self.port, 'sda1', '0',
'PUT', '/a/c/o', headers=headers)
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
expect_headers = HeaderKeyDict(resp.getheaders())
to_send = b"%x\r\n%s\r\n" % (len(test_doc), test_doc)
conn.send(to_send)
if finish_body:
conn.send(b"0\r\n\r\n")
# verify 100-continue response to mark end of phase1
resp = conn.getexpect()
self.assertEqual(resp.status, 100)
# yield relevant context for test
yield {
'conn': conn,
'expect_headers': expect_headers,
'put_timestamp': put_timestamp,
'mock_container_update': _container_update,
}
# give the object server a little time to trampoline enough to
# recognize request has finished, or socket has closed or whatever
sleep(0.01)
def test_multiphase_put_client_disconnect_right_before_commit(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# just bail straight out
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
sleep(0)
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# verify successful object data file write
found_files = self.find_files()
# non durable .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_client_disconnect_in_the_middle_of_commit(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# start commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123--",
))
# but don't quite the commit body
to_send = b"%x\r\n%s" % \
(len(commit_confirmation_doc), commit_confirmation_doc[:-1])
conn.send(to_send)
# and then bail out
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
sleep(0)
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# verify successful object data file write
found_files = self.find_files()
# non durable .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_no_metadata_replicated(self):
test_data = b'obj data'
test_doc = b"\r\n".join((
b"--boundary123",
b"X-Document: object body",
b"",
test_data,
b"--boundary123",
))
put_timestamp = utils.Timestamp.now().internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, headers=headers) as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
# N.B. no X-Obj-Metadata-Footer header
self.assertNotIn('X-Obj-Metadata-Footer', expect_headers)
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123--",
))
to_send = b"%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_metadata_footer(self):
with self._check_multiphase_put_commit_handling() as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
self.assertEqual(expect_headers['X-Obj-Metadata-Footer'], 'yes')
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123--",
))
to_send = b"%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2#d.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
with open(obj_datafile) as fd:
actual_meta = diskfile.read_metadata(fd)
expected_meta = {'Content-Length': '82',
'name': '/a/c/o',
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Timestamp': put_timestamp.normal,
'Content-Type': 'text/plain'}
for k, v in actual_meta.items():
# See diskfile.py:_decode_metadata
if six.PY2:
self.assertIsInstance(k, six.binary_type)
self.assertIsInstance(v, six.binary_type)
else:
self.assertIsInstance(k, six.text_type)
self.assertIsInstance(v, six.text_type)
self.assertIsNotNone(actual_meta.pop('ETag', None))
self.assertEqual(expected_meta, actual_meta)
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_metadata_footer_disconnect(self):
test_data = b'obj data'
test_doc = b"\r\n".join((
b"--boundary123",
b"X-Document: object body",
b"",
test_data,
b"--boundary123",
))
# eventlet.wsgi won't return < network_chunk_size from a chunked read
self.app.network_chunk_size = 16
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, finish_body=False) as context:
conn = context['conn']
# make footer doc
footer_meta = {
"X-Object-Sysmeta-Ec-Frag-Index": "2",
"Etag": md5(test_data, usedforsecurity=False).hexdigest(),
}
footer_json = json.dumps(footer_meta).encode('ascii')
footer_meta_cksum = md5(
footer_json, usedforsecurity=False).hexdigest().encode('ascii')
# send most of the footer doc
footer_doc = b"\r\n".join((
b"X-Document: object metadata",
b"Content-MD5: " + footer_meta_cksum,
b"",
footer_json,
))
# but don't send final boundary nor last chunk
to_send = b"%x\r\n%s\r\n" % \
(len(footer_doc), footer_doc)
conn.send(to_send)
# and then bail out
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
sleep(0)
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
self.assertIn(' 499 ', log_lines[0])
# no artifacts left on disk
found_files = self.find_files()
self.assertFalse(found_files)
# ... and no container update
_container_update = context['mock_container_update']
self.assertFalse(_container_update.called)
def test_multiphase_put_ec_fragment_in_headers_no_footers(self):
test_data = b'obj data'
test_doc = b"\r\n".join((
b"--boundary123",
b"X-Document: object body",
b"",
test_data,
b"--boundary123",
))
# phase1 - PUT request with multiphase commit conversation
# no object metadata in footer
put_timestamp = utils.Timestamp.now().internal
headers = {
'Content-Type': 'text/plain',
'X-Timestamp': put_timestamp,
'Transfer-Encoding': 'chunked',
'Expect': '100-continue',
# normally the frag index gets sent in the MIME footer (which this
# test doesn't have, see `test_multiphase_put_metadata_footer`),
# but the proxy *could* send the frag index in the headers and
# this test verifies that would work.
'X-Object-Sysmeta-Ec-Frag-Index': '2',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Obj-Content-Length': len(test_data),
'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary123',
'X-Backend-Obj-Multiphase-Commit': 'yes',
}
with self._check_multiphase_put_commit_handling(
test_doc=test_doc, headers=headers) as context:
expect_headers = context['expect_headers']
self.assertEqual(expect_headers['X-Obj-Multiphase-Commit'], 'yes')
# N.B. no X-Obj-Metadata-Footer header
self.assertNotIn('X-Obj-Metadata-Footer', expect_headers)
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123--",
))
to_send = b"%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2#d.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_bad_commit_message(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"junkjunk",
b"--boundary123--",
))
to_send = b"%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
resp = conn.getresponse()
self.assertEqual(resp.status, 500)
resp.read()
resp.close()
put_timestamp = context['put_timestamp']
_container_update = context['mock_container_update']
# verify that durable data file was NOT created
found_files = self.find_files()
# non durable .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And no container update
self.assertFalse(_container_update.called)
def test_multiphase_put_drains_extra_commit_junk(self):
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation to start phase2
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123",
b"X-Document: we got cleverer",
b"",
b"stuff stuff meaningless stuuuuuuuuuuff",
b"--boundary123",
b"X-Document: we got even cleverer; can you believe it?",
b"Waneshaft: ambifacient lunar",
b"Casing: malleable logarithmic",
b"",
b"potato potato potato potato potato potato potato",
b"--boundary123--",
))
to_send = b"%x\r\n%s\r\n0\r\n\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# verify success (2xx) to make end of phase2
resp = conn.getresponse()
self.assertEqual(resp.status, 201)
resp.read()
# make another request to validate the HTTP protocol state
conn.putrequest('GET', '/sda1/0/a/c/o')
conn.putheader('X-Backend-Storage-Policy-Index', '1')
conn.endheaders()
resp = conn.getresponse()
self.assertEqual(resp.status, 200)
resp.read()
resp.close()
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2#d.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# And container update was called
self.assertTrue(context['mock_container_update'].called)
def test_multiphase_put_drains_extra_commit_junk_disconnect(self):
commit_confirmation_doc = b"\r\n".join((
b"X-Document: put commit",
b"",
b"commit_confirmation",
b"--boundary123",
b"X-Document: we got cleverer",
b"",
b"stuff stuff meaningless stuuuuuuuuuuff",
b"--boundary123",
b"X-Document: we got even cleverer; can you believe it?",
b"Waneshaft: ambifacient lunar",
b"Casing: malleable logarithmic",
b"",
b"potato potato potato potato potato potato potato",
))
# eventlet.wsgi won't return < network_chunk_size from a chunked read
self.app.network_chunk_size = 16
with self._check_multiphase_put_commit_handling() as context:
conn = context['conn']
# send commit confirmation and some other stuff
# but don't send final boundary or last chunk
to_send = b"%x\r\n%s\r\n" % \
(len(commit_confirmation_doc), commit_confirmation_doc)
conn.send(to_send)
# and then bail out
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
# the object server needs to recognize the socket is closed
# or at least timeout, we'll have to wait
timeout = time() + (self.conf['client_timeout'] + 1)
while True:
try:
# and make sure it demonstrates the client disconnect
log_lines = self.logger.get_lines_for_level('info')
self.assertEqual(len(log_lines), 1)
except AssertionError:
if time() < timeout:
sleep(0.01)
else:
raise
else:
break
status = log_lines[0].split()[7]
self.assertEqual(status, '499')
# verify successful object data and durable state file write
put_timestamp = context['put_timestamp']
found_files = self.find_files()
# .data file is there
self.assertEqual(len(found_files['.data']), 1)
obj_datafile = found_files['.data'][0]
self.assertEqual("%s#2#d.data" % put_timestamp.internal,
os.path.basename(obj_datafile))
# but no other files
self.assertFalse(found_files['.data'][1:])
found_files.pop('.data')
self.assertFalse(found_files)
# but no container update
self.assertFalse(context['mock_container_update'].called)
@patch_policies
class TestZeroCopy(unittest.TestCase):
"""Test the object server's zero-copy functionality"""
def _system_can_zero_copy(self):
if not splice.available:
return False
try:
utils.get_md5_socket()
except IOError:
return False
return True
def setUp(self):
skip_if_no_xattrs()
if not self._system_can_zero_copy():
raise unittest.SkipTest("zero-copy support is missing")
self.testdir = mkdtemp(suffix="obj_server_zero_copy")
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
conf = {'devices': self.testdir,
'mount_check': 'false',
'splice': 'yes',
'disk_chunk_size': '4096'}
self.object_controller = object_server.ObjectController(
conf, logger=debug_logger())
self.df_mgr = diskfile.DiskFileManager(
conf, self.object_controller.logger)
listener = listen_zero()
port = listener.getsockname()[1]
self.wsgi_greenlet = spawn(
wsgi.server, listener, self.object_controller, NullLogger())
self.http_conn = httplib.HTTPConnection('127.0.0.1', port)
self.http_conn.connect()
def tearDown(self):
"""Tear down for testing swift.object.server.ObjectController"""
self.wsgi_greenlet.kill()
rmtree(self.testdir)
def test_GET(self):
url_path = '/sda1/2100/a/c/o'
self.http_conn.request('PUT', url_path, 'obj contents',
{'X-Timestamp': '127082564.24709',
'Content-Type': 'application/test'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, b'obj contents')
def test_GET_big(self):
# Test with a large-ish object to make sure we handle full socket
# buffers correctly.
obj_contents = b'A' * 4 * 1024 * 1024 # 4 MiB
url_path = '/sda1/2100/a/c/o'
self.http_conn.request('PUT', url_path, obj_contents,
{'X-Timestamp': '1402600322.52126',
'Content-Type': 'application/test'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, obj_contents)
def test_quarantine(self):
obj_hash = hash_path('a', 'c', 'o')
url_path = '/sda1/2100/a/c/o'
ts = '1402601849.47475'
self.http_conn.request('PUT', url_path, b'obj contents',
{'X-Timestamp': ts,
'Content-Type': 'application/test'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
# go goof up the file on disk
fname = os.path.join(self.testdir, 'sda1', 'objects', '2100',
obj_hash[-3:], obj_hash, ts + '.data')
with open(fname, 'rb+') as fh:
fh.write(b'XYZ')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, b'XYZ contents')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
# it was quarantined by the previous request
self.assertEqual(response.status, 404)
response.read()
def test_quarantine_on_well_formed_zero_byte_file(self):
# Make sure we work around an oddity in Linux's hash sockets
url_path = '/sda1/2100/a/c/o'
ts = '1402700497.71333'
self.http_conn.request(
'PUT', url_path, '',
{'X-Timestamp': ts, 'Content-Length': '0',
'Content-Type': 'application/test'})
response = self.http_conn.getresponse()
self.assertEqual(response.status, 201)
response.read()
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200)
contents = response.read()
self.assertEqual(contents, b'')
self.http_conn.request('GET', url_path)
response = self.http_conn.getresponse()
self.assertEqual(response.status, 200) # still there
contents = response.read()
self.assertEqual(contents, b'')
class TestConfigOptionHandling(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
rmtree(self.tmpdir)
def _app_config(self, config):
contents = dedent(config)
conf_file = os.path.join(self.tmpdir, 'object-server.conf')
with open(conf_file, 'w') as f:
f.write(contents)
return init_request_processor(conf_file, 'object-server')[:2]
def test_default(self):
config = """
[DEFAULT]
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
"""
app, config = self._app_config(config)
self.assertNotIn('reclaim_age', config)
for policy in POLICIES:
self.assertEqual(app._diskfile_router[policy].reclaim_age, 604800)
def test_option_in_app(self):
config = """
[DEFAULT]
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
reclaim_age = 100
"""
app, config = self._app_config(config)
self.assertEqual(config['reclaim_age'], '100')
for policy in POLICIES:
self.assertEqual(app._diskfile_router[policy].reclaim_age, 100)
def test_option_in_default(self):
config = """
[DEFAULT]
reclaim_age = 200
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
"""
app, config = self._app_config(config)
self.assertEqual(config['reclaim_age'], '200')
for policy in POLICIES:
self.assertEqual(app._diskfile_router[policy].reclaim_age, 200)
def test_option_in_both(self):
config = """
[DEFAULT]
reclaim_age = 300
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
reclaim_age = 400
"""
app, config = self._app_config(config)
self.assertEqual(config['reclaim_age'], '300')
for policy in POLICIES:
self.assertEqual(app._diskfile_router[policy].reclaim_age, 300)
# use paste "set" syntax to override global config value
config = """
[DEFAULT]
reclaim_age = 500
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
set reclaim_age = 600
"""
app, config = self._app_config(config)
self.assertEqual(config['reclaim_age'], '600')
for policy in POLICIES:
self.assertEqual(app._diskfile_router[policy].reclaim_age, 600)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import io
import json
import unittest
import os
import mock
from gzip import GzipFile
from shutil import rmtree
import six
import six.moves.cPickle as pickle
import time
import tempfile
from contextlib import contextmanager, closing
from collections import defaultdict
from errno import ENOENT, ENOTEMPTY, ENOTDIR
from eventlet.green import subprocess
from eventlet import Timeout, sleep
from test.debug_logger import debug_logger
from test.unit import (patch_policies, make_timestamp_iter, mocked_http_conn,
mock_check_drive, skip_if_no_xattrs)
from swift.common import utils
from swift.common.utils import (hash_path, mkdirs, normalize_timestamp,
storage_directory)
from swift.common import ring
from swift.common.recon import RECON_OBJECT_FILE
from swift.obj import diskfile, replicator as object_replicator
from swift.common.storage_policy import StoragePolicy, POLICIES
from swift.common.exceptions import PartitionLockTimeout
def _ips(*args, **kwargs):
return ['127.0.0.0']
def mock_http_connect(status):
class FakeConn(object):
def __init__(self, status, *args, **kwargs):
self.status = status
self.reason = 'Fake'
self.host = args[0]
self.port = args[1]
self.method = args[4]
self.path = args[5]
self.with_exc = False
self.headers = kwargs.get('headers', {})
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def getheader(self, header):
return self.headers[header]
def read(self, amt=None):
return pickle.dumps({})
def close(self):
return
return lambda *args, **kwargs: FakeConn(status, *args, **kwargs)
process_errors = []
class MockProcess(object):
ret_code = None
ret_log = None
check_args = None
captured_log = None
class Stream(object):
def read(self):
return next(MockProcess.ret_log)
def __init__(self, *args, **kwargs):
targs = next(MockProcess.check_args)
for targ in targs:
# Allow more than 2 candidate targs
# (e.g. a case that either node is fine when nodes shuffled)
if isinstance(targ, tuple):
allowed = False
for target in targ:
if target in args[0]:
allowed = True
if not allowed:
process_errors.append("Invalid: %s not in %s" % (targ,
args))
else:
if targ not in args[0]:
process_errors.append("Invalid: %s not in %s" % (targ,
args))
self.captured_info = {
'rsync_args': args[0],
}
self.stdout = self.Stream()
def wait(self):
# the _mock_process context manager assures this class attribute is a
# mutable list and takes care of resetting it
rv = next(self.ret_code)
if self.captured_log is not None:
self.captured_info['ret_code'] = rv
self.captured_log.append(self.captured_info)
return rv
@contextmanager
def _mock_process(ret):
captured_log = []
MockProcess.captured_log = captured_log
orig_process = subprocess.Popen
MockProcess.ret_code = (i[0] for i in ret)
MockProcess.ret_log = (i[1] if six.PY2 else i[1].encode('utf8')
for i in ret)
MockProcess.check_args = (i[2] for i in ret)
object_replicator.subprocess.Popen = MockProcess
yield captured_log
MockProcess.captured_log = None
object_replicator.subprocess.Popen = orig_process
class MockHungProcess(object):
def __init__(self, polls_needed=0, *args, **kwargs):
class MockStdout(object):
def read(self):
pass
self.stdout = MockStdout()
self._state = 'running'
self._calls = []
self._polls = 0
self._polls_needed = polls_needed
def wait(self, timeout=None):
self._calls.append(('wait', self._state))
if self._state == 'running':
# Sleep so we trip the rsync timeout
sleep(1)
raise BaseException('You need to mock out some timeouts')
if not self._polls_needed:
self._state = 'os-reaped'
return 137
if timeout is not None:
raise subprocess.TimeoutExpired('some cmd', timeout)
raise BaseException("You're waiting indefinitely on something "
"we've established is hung")
def poll(self):
self._calls.append(('poll', self._state))
self._polls += 1
if self._polls >= self._polls_needed:
self._state = 'os-reaped'
return 137
else:
return None
def terminate(self):
self._calls.append(('terminate', self._state))
if self._state == 'running':
self._state = 'terminating'
def kill(self):
self._calls.append(('kill', self._state))
self._state = 'killed'
def _create_test_rings(path, devs=None, next_part_power=None):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5],
]
intended_devs = devs or [
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '127.0.0.0', 'port': 6200},
{'id': 1, 'device': 'sda', 'zone': 1,
'region': 2, 'ip': '127.0.0.1', 'port': 6200},
{'id': 2, 'device': 'sda', 'zone': 2,
'region': 3, 'ip': '127.0.0.2', 'port': 6200},
{'id': 3, 'device': 'sda', 'zone': 4,
'region': 2, 'ip': '127.0.0.3', 'port': 6200},
{'id': 4, 'device': 'sda', 'zone': 5,
'region': 1, 'ip': '127.0.0.4', 'port': 6200,
'replication_ip': '127.0.1.4'},
{'id': 5, 'device': 'sda', 'zone': 6,
'region': 3, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200},
{'id': 6, 'device': 'sda', 'zone': 7, 'region': 1,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6200},
]
intended_part_shift = 30
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift, next_part_power),
f)
testgz = os.path.join(path, 'object-1.ring.gz')
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift, next_part_power),
f)
for policy in POLICIES:
policy.object_ring = None # force reload
return
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)])
class TestObjectReplicator(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
# recon cache path
self.recon_cache = tempfile.mkdtemp()
rmtree(self.recon_cache, ignore_errors=1)
os.mkdir(self.recon_cache)
# Setup a test ring (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
self.objects, self.objects_1, self.parts, self.parts_1 = \
self._write_disk_data('sda')
_create_test_rings(self.testdir)
self.logger = debug_logger('test-replicator')
self.conf = dict(
bind_ip=_ips()[0], bind_port=6200,
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1', sync_method='rsync',
recon_cache_path=self.recon_cache)
self._create_replicator()
self.ts = make_timestamp_iter()
def tearDown(self):
self.assertFalse(process_errors)
rmtree(self.testdir, ignore_errors=1)
rmtree(self.recon_cache, ignore_errors=1)
def test_ring_ip_and_bind_ip(self):
# make clean base_conf
base_conf = dict(self.conf)
for key in ('bind_ip', 'ring_ip'):
base_conf.pop(key, None)
# default ring_ip is always 0.0.0.0
self.conf = base_conf
self._create_replicator()
self.assertEqual('0.0.0.0', self.replicator.ring_ip)
# bind_ip works fine for legacy configs
self.conf = dict(base_conf)
self.conf['bind_ip'] = '192.168.1.42'
self._create_replicator()
self.assertEqual('192.168.1.42', self.replicator.ring_ip)
# ring_ip works fine by-itself
self.conf = dict(base_conf)
self.conf['ring_ip'] = '192.168.1.43'
self._create_replicator()
self.assertEqual('192.168.1.43', self.replicator.ring_ip)
# if you have both ring_ip wins
self.conf = dict(base_conf)
self.conf['bind_ip'] = '192.168.1.44'
self.conf['ring_ip'] = '192.168.1.45'
self._create_replicator()
self.assertEqual('192.168.1.45', self.replicator.ring_ip)
def test_handoff_replication_setting_warnings(self):
conf_tests = [
# (config, expected_warning)
({}, False),
({'handoff_delete': 'auto'}, False),
({'handoffs_first': 'no'}, False),
({'handoff_delete': '2'}, True),
({'handoffs_first': 'yes'}, True),
({'handoff_delete': '1', 'handoffs_first': 'yes'}, True),
]
log_message = 'Handoff only mode is not intended for normal ' \
'operation, please disable handoffs_first and ' \
'handoff_delete before the next normal rebalance'
for config, expected_warning in conf_tests:
self.logger.clear()
object_replicator.ObjectReplicator(config, logger=self.logger)
warning_log_lines = self.logger.get_lines_for_level('warning')
if expected_warning:
expected_log_lines = [log_message]
else:
expected_log_lines = []
self.assertEqual(expected_log_lines, warning_log_lines,
'expected %s != %s for config %r' % (
expected_log_lines,
warning_log_lines,
config,
))
def _write_disk_data(self, disk_name, with_json=False):
os.mkdir(os.path.join(self.devices, disk_name))
objects = os.path.join(self.devices, disk_name,
diskfile.get_data_dir(POLICIES[0]))
objects_1 = os.path.join(self.devices, disk_name,
diskfile.get_data_dir(POLICIES[1]))
os.mkdir(objects)
os.mkdir(objects_1)
parts = {}
parts_1 = {}
for part in ['0', '1', '2', '3']:
parts[part] = os.path.join(objects, part)
os.mkdir(parts[part])
parts_1[part] = os.path.join(objects_1, part)
os.mkdir(parts_1[part])
if with_json:
for json_file in ['auditor_status_ZBF.json',
'auditor_status_ALL.json']:
for obj_dir in [objects, objects_1]:
with open(os.path.join(obj_dir, json_file), 'w'):
pass
return objects, objects_1, parts, parts_1
def _create_replicator(self):
self.replicator = object_replicator.ObjectReplicator(self.conf)
self.replicator.logger = self.logger
self.replicator._zero_stats()
self.replicator.all_devs_info = set()
self.df_mgr = diskfile.DiskFileManager(self.conf, self.logger)
def test_run_once_no_local_device_in_ring(self):
conf = dict(swift_dir=self.testdir, devices=self.devices,
bind_ip='1.1.1.1', recon_cache_path=self.recon_cache,
mount_check='false', timeout='300', stats_interval='1')
replicator = object_replicator.ObjectReplicator(conf,
logger=self.logger)
replicator.run_once()
expected = [
"Can't find itself in policy with index 0 with ips 1.1.1.1 and"
" with port 6200 in ring file, not replicating",
"Can't find itself in policy with index 1 with ips 1.1.1.1 and"
" with port 6200 in ring file, not replicating",
]
self.assertEqual(expected, self.logger.get_lines_for_level('error'))
def test_run_once(self):
conf = dict(swift_dir=self.testdir, devices=self.devices,
bind_ip=_ips()[0], recon_cache_path=self.recon_cache,
mount_check='false', timeout='300', stats_interval='1')
replicator = object_replicator.ObjectReplicator(conf,
logger=self.logger)
was_connector = object_replicator.http_connect
object_replicator.http_connect = mock_http_connect(200)
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, cur_part, data_dir)
process_arg_checker = []
ring = replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
rsync_mods = tuple(['%s::object/sda/objects/%s' %
(node['ip'], cur_part) for node in nodes])
for node in nodes:
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mods]))
start = replicator.replication_cycle
self.assertGreaterEqual(start, 0)
self.assertLessEqual(start, 9)
with _mock_process(process_arg_checker):
replicator.run_once()
self.assertEqual((start + 1) % 10, replicator.replication_cycle)
self.assertFalse(process_errors)
self.assertFalse(self.logger.get_lines_for_level('error'))
# Returns 0 at first, and 60 on all following .next() calls
def _infinite_gen():
yield 0
while True:
yield 60
for cycle in range(1, 10):
with _mock_process(process_arg_checker):
with mock.patch('time.time', side_effect=_infinite_gen()):
replicator.run_once()
self.assertEqual((start + 1 + cycle) % 10,
replicator.replication_cycle)
recon_fname = os.path.join(self.recon_cache, RECON_OBJECT_FILE)
with open(recon_fname) as cachefile:
recon = json.loads(cachefile.read())
self.assertEqual(1, recon.get('replication_time'))
self.assertIn('replication_stats', recon)
self.assertIn('replication_last', recon)
expected = 'Object replication complete (once). (1.00 minutes)'
self.assertIn(expected, self.logger.get_lines_for_level('info'))
self.assertFalse(self.logger.get_lines_for_level('error'))
object_replicator.http_connect = was_connector
# policy 1
def test_run_once_1(self):
conf = dict(swift_dir=self.testdir, devices=self.devices,
recon_cache_path=self.recon_cache,
mount_check='false', timeout='300', stats_interval='1')
replicator = object_replicator.ObjectReplicator(conf,
logger=self.logger)
was_connector = object_replicator.http_connect
object_replicator.http_connect = mock_http_connect(200)
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[1])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects_1, cur_part, data_dir)
process_arg_checker = []
ring = replicator.load_object_ring(POLICIES[1])
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
rsync_mods = tuple(['%s::object/sda/objects-1/%s' %
(node['ip'], cur_part) for node in nodes])
for node in nodes:
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mods]))
with _mock_process(process_arg_checker):
with mock.patch('swift.obj.replicator.whataremyips',
side_effect=_ips):
replicator.run_once()
self.assertFalse(process_errors)
self.assertFalse(self.logger.get_lines_for_level('error'))
object_replicator.http_connect = was_connector
def test_check_ring(self):
for pol in POLICIES:
obj_ring = self.replicator.load_object_ring(pol)
self.assertTrue(self.replicator.check_ring(obj_ring))
orig_check = self.replicator.next_check
self.replicator.next_check = orig_check - 30
self.assertTrue(self.replicator.check_ring(obj_ring))
self.replicator.next_check = orig_check
orig_ring_time = obj_ring._mtime
obj_ring._mtime = orig_ring_time - 30
self.assertTrue(self.replicator.check_ring(obj_ring))
self.replicator.next_check = orig_check - 30
self.assertFalse(self.replicator.check_ring(obj_ring))
def test_collect_jobs_mkdirs_error(self):
non_local = {}
def blowup_mkdirs(path):
non_local['path'] = path
raise OSError('Ow!')
with mock.patch.object(object_replicator, 'mkdirs', blowup_mkdirs):
rmtree(self.objects, ignore_errors=1)
object_replicator.mkdirs = blowup_mkdirs
self.replicator.collect_jobs()
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR creating %s: ' % non_local['path']])
log_args, log_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!')
def test_collect_jobs(self):
jobs = self.replicator.collect_jobs()
jobs_to_delete = [j for j in jobs if j['delete']]
jobs_by_pol_part = {}
for job in jobs:
jobs_by_pol_part[str(int(job['policy'])) + job['partition']] = job
self.assertEqual(len(jobs_to_delete), 2)
self.assertEqual('1', jobs_to_delete[0]['partition'])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['00']['nodes']], [1, 2])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['01']['nodes']],
[1, 2, 3])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['02']['nodes']], [2, 3])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['03']['nodes']], [3, 1])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['10']['nodes']], [1, 2])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['11']['nodes']],
[1, 2, 3])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['12']['nodes']], [2, 3])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['13']['nodes']], [3, 1])
for part in ['00', '01', '02', '03']:
for node in jobs_by_pol_part[part]['nodes']:
self.assertEqual(node['device'], 'sda')
self.assertEqual(jobs_by_pol_part[part]['path'],
os.path.join(self.objects, part[1:]))
for part in ['10', '11', '12', '13']:
for node in jobs_by_pol_part[part]['nodes']:
self.assertEqual(node['device'], 'sda')
self.assertEqual(jobs_by_pol_part[part]['path'],
os.path.join(self.objects_1, part[1:]))
def test_collect_jobs_unmounted(self):
with mock_check_drive() as mocks:
jobs = self.replicator.collect_jobs()
self.assertEqual(jobs, [])
self.assertEqual(mocks['ismount'].mock_calls, [])
self.assertEqual(len(mocks['isdir'].mock_calls), 2)
self.replicator.mount_check = True
with mock_check_drive() as mocks:
jobs = self.replicator.collect_jobs()
self.assertEqual(jobs, [])
self.assertEqual(mocks['isdir'].mock_calls, [])
self.assertEqual(len(mocks['ismount'].mock_calls), 2)
def test_collect_jobs_failure_report_with_auditor_stats_json(self):
devs = [
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
{'id': 1, 'device': 'sdb', 'zone': 1,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
{'id': 2, 'device': 'sdc', 'zone': 2,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.1', 'replication_port': 6200},
{'id': 3, 'device': 'sdd', 'zone': 3,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.1', 'replication_port': 6200},
]
objects_sdb, objects_1_sdb, _, _ = \
self._write_disk_data('sdb', with_json=True)
objects_sdc, objects_1_sdc, _, _ = \
self._write_disk_data('sdc', with_json=True)
objects_sdd, objects_1_sdd, _, _ = \
self._write_disk_data('sdd', with_json=True)
_create_test_rings(self.testdir, devs)
self.replicator.collect_jobs(override_partitions=[1])
self.assertEqual(self.replicator.total_stats.failure, 0)
def test_collect_jobs_with_override_parts_and_unexpected_part_dir(self):
self.replicator.collect_jobs(override_partitions=[0, 2])
self.assertEqual(self.replicator.total_stats.failure, 0)
os.mkdir(os.path.join(self.objects_1, 'foo'))
jobs = self.replicator.collect_jobs(override_partitions=[0, 2])
found_jobs = set()
for j in jobs:
found_jobs.add((int(j['policy']), int(j['partition'])))
self.assertEqual(found_jobs, {
(0, 0),
(0, 2),
(1, 0),
(1, 2),
})
num_disks = len(POLICIES[1].object_ring.devs)
# N.B. it's not clear why the UUT increments failure per device
self.assertEqual(self.replicator.total_stats.failure, num_disks)
@mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l)
def test_collect_jobs_multi_disk(self, mock_shuffle):
devs = [
# Two disks on same IP/port
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
{'id': 1, 'device': 'sdb', 'zone': 1,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
# Two disks on same server, different ports
{'id': 2, 'device': 'sdc', 'zone': 2,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6200},
{'id': 3, 'device': 'sdd', 'zone': 4,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6201},
]
objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb')
objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc')
objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd')
_create_test_rings(self.testdir, devs)
jobs = self.replicator.collect_jobs()
self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls)
jobs_to_delete = [j for j in jobs if j['delete']]
self.assertEqual(len(jobs_to_delete), 4)
self.assertEqual([
'1', '2', # policy 0; 1 not on sda, 2 not on sdb
'1', '2', # policy 1; 1 not on sda, 2 not on sdb
], [j['partition'] for j in jobs_to_delete])
jobs_by_pol_part_dev = {}
for job in jobs:
# There should be no jobs with a device not in just sda & sdb
self.assertTrue(job['device'] in ('sda', 'sdb'))
jobs_by_pol_part_dev[
str(int(job['policy'])) + job['partition'] + job['device']
] = job
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['00sda']['nodes']],
[1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['00sdb']['nodes']],
[0, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['01sda']['nodes']],
[1, 2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['01sdb']['nodes']],
[2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['02sda']['nodes']],
[2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['02sdb']['nodes']],
[2, 3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['03sda']['nodes']],
[3, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['03sdb']['nodes']],
[3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['10sda']['nodes']],
[1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['10sdb']['nodes']],
[0, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['11sda']['nodes']],
[1, 2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['11sdb']['nodes']],
[2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['12sda']['nodes']],
[2, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['12sdb']['nodes']],
[2, 3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['13sda']['nodes']],
[3, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['13sdb']['nodes']],
[3, 0])
for part in ['00', '01', '02', '03']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sda']['path'],
os.path.join(self.objects, part[1:]))
self.assertEqual(jobs_by_pol_part_dev[part + 'sdb']['path'],
os.path.join(objects_sdb, part[1:]))
for part in ['10', '11', '12', '13']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sda']['path'],
os.path.join(self.objects_1, part[1:]))
self.assertEqual(jobs_by_pol_part_dev[part + 'sdb']['path'],
os.path.join(objects_1_sdb, part[1:]))
@mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l)
def test_collect_jobs_multi_disk_diff_ports_normal(self, mock_shuffle):
# Normally (servers_per_port=0), replication_ip AND replication_port
# are used to determine local ring device entries. Here we show that
# with bind_ip='127.0.0.1', bind_port=6200, only "sdc" is local.
devs = [
# Two disks on same IP/port
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
{'id': 1, 'device': 'sdb', 'zone': 1,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
# Two disks on same server, different ports
{'id': 2, 'device': 'sdc', 'zone': 2,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6200},
{'id': 3, 'device': 'sdd', 'zone': 4,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6201},
]
objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb')
objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc')
objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd')
_create_test_rings(self.testdir, devs)
self.conf['bind_ip'] = '127.0.0.1'
self._create_replicator()
jobs = self.replicator.collect_jobs()
self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls)
jobs_to_delete = [j for j in jobs if j['delete']]
self.assertEqual(len(jobs_to_delete), 2)
self.assertEqual([
'3', # policy 0; 3 not on sdc
'3', # policy 1; 3 not on sdc
], [j['partition'] for j in jobs_to_delete])
jobs_by_pol_part_dev = {}
for job in jobs:
# There should be no jobs with a device not sdc
self.assertEqual(job['device'], 'sdc')
jobs_by_pol_part_dev[
str(int(job['policy'])) + job['partition'] + job['device']
] = job
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['00sdc']['nodes']],
[0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['01sdc']['nodes']],
[1, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['02sdc']['nodes']],
[3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['03sdc']['nodes']],
[3, 0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['10sdc']['nodes']],
[0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['11sdc']['nodes']],
[1, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['12sdc']['nodes']],
[3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['13sdc']['nodes']],
[3, 0, 1])
for part in ['00', '01', '02', '03']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'],
os.path.join(objects_sdc, part[1:]))
for part in ['10', '11', '12', '13']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'],
os.path.join(objects_1_sdc, part[1:]))
@mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l)
def test_collect_jobs_multi_disk_servers_per_port(self, mock_shuffle):
# Normally (servers_per_port=0), replication_ip AND replication_port
# are used to determine local ring device entries. Here we show that
# with servers_per_port > 0 and bind_ip='127.0.0.1', bind_port=6200,
# then both "sdc" and "sdd" are local.
devs = [
# Two disks on same IP/port
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
{'id': 1, 'device': 'sdb', 'zone': 1,
'region': 1, 'ip': '1.1.1.1', 'port': 1111,
'replication_ip': '127.0.0.0', 'replication_port': 6200},
# Two disks on same server, different ports
{'id': 2, 'device': 'sdc', 'zone': 2,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6200},
{'id': 3, 'device': 'sdd', 'zone': 4,
'region': 2, 'ip': '1.1.1.2', 'port': 1112,
'replication_ip': '127.0.0.1', 'replication_port': 6201},
]
objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb')
objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc')
objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd')
_create_test_rings(self.testdir, devs)
self.conf['bind_ip'] = '127.0.0.1'
self.conf['servers_per_port'] = 1 # diff port ok
self._create_replicator()
jobs = self.replicator.collect_jobs()
self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls)
jobs_to_delete = [j for j in jobs if j['delete']]
self.assertEqual(len(jobs_to_delete), 4)
self.assertEqual([
'3', '0', # policy 0; 3 not on sdc, 0 not on sdd
'3', '0', # policy 1; 3 not on sdc, 0 not on sdd
], [j['partition'] for j in jobs_to_delete])
jobs_by_pol_part_dev = {}
for job in jobs:
# There should be no jobs with a device not in just sdc & sdd
self.assertTrue(job['device'] in ('sdc', 'sdd'))
jobs_by_pol_part_dev[
str(int(job['policy'])) + job['partition'] + job['device']
] = job
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['00sdc']['nodes']],
[0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['00sdd']['nodes']],
[0, 1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['01sdc']['nodes']],
[1, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['01sdd']['nodes']],
[1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['02sdc']['nodes']],
[3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['02sdd']['nodes']],
[2, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['03sdc']['nodes']],
[3, 0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['03sdd']['nodes']],
[0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['10sdc']['nodes']],
[0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['10sdd']['nodes']],
[0, 1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['11sdc']['nodes']],
[1, 3])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['11sdd']['nodes']],
[1, 2])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['12sdc']['nodes']],
[3, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['12sdd']['nodes']],
[2, 0])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['13sdc']['nodes']],
[3, 0, 1])
self.assertEqual([node['id']
for node in jobs_by_pol_part_dev['13sdd']['nodes']],
[0, 1])
for part in ['00', '01', '02', '03']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'],
os.path.join(objects_sdc, part[1:]))
self.assertEqual(jobs_by_pol_part_dev[part + 'sdd']['path'],
os.path.join(objects_sdd, part[1:]))
for part in ['10', '11', '12', '13']:
self.assertEqual(jobs_by_pol_part_dev[part + 'sdc']['path'],
os.path.join(objects_1_sdc, part[1:]))
self.assertEqual(jobs_by_pol_part_dev[part + 'sdd']['path'],
os.path.join(objects_1_sdd, part[1:]))
def test_collect_jobs_handoffs_first(self):
self.replicator.handoffs_first = True
jobs = self.replicator.collect_jobs()
self.assertTrue(jobs[0]['delete'])
self.assertEqual('1', jobs[0]['partition'])
def test_handoffs_first_mode_will_process_all_jobs_after_handoffs(self):
# make an object in the handoff & primary partition
expected_suffix_paths = []
for policy in POLICIES:
# primary
ts = next(self.ts)
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', policy)
with df.create() as w:
w.write(b'asdf')
w.put({'X-Timestamp': ts.internal})
w.commit(ts)
expected_suffix_paths.append(os.path.dirname(df._datadir))
# handoff
ts = next(self.ts)
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o', policy)
with df.create() as w:
w.write(b'asdf')
w.put({'X-Timestamp': ts.internal})
w.commit(ts)
expected_suffix_paths.append(os.path.dirname(df._datadir))
# rsync will be called for all parts we created objects in
process_arg_checker = [
# (return_code, stdout, <each in capture rsync args>)
(0, '', []),
(0, '', []),
(0, '', []), # handoff job "first" policy
(0, '', []),
(0, '', []),
(0, '', []), # handoff job "second" policy
(0, '', []),
(0, '', []), # update job "first" policy
(0, '', []),
(0, '', []), # update job "second" policy
]
# each handoff partition node gets one replicate request for after
# rsync (2 * 3), each primary partition with objects gets two
# replicate requests (pre-flight and post sync) to each of each
# partners (2 * 2 * 2), the 2 remaining empty parts (2 & 3) get a
# pre-flight replicate request per node for each storage policy
# (2 * 2 * 2) - so 6 + 8 + 8 == 22
replicate_responses = [200] * 22
stub_body = pickle.dumps({})
with _mock_process(process_arg_checker) as rsync_log, \
mock.patch('swift.obj.replicator.whataremyips',
side_effect=_ips), \
mocked_http_conn(*replicate_responses,
body=stub_body) as conn_log:
self.replicator.handoffs_first = True
self.replicator.replicate()
# all jobs processed!
self.assertEqual(self.replicator.job_count,
self.replicator.total_stats.attempted)
self.assertFalse(self.replicator.handoffs_remaining)
# sanity, all the handoffs suffixes we filled in were rsync'd
found_rsync_suffix_paths = set()
for subprocess_info in rsync_log:
local_path, remote_path = subprocess_info['rsync_args'][-2:]
found_rsync_suffix_paths.add(local_path)
self.assertEqual(set(expected_suffix_paths), found_rsync_suffix_paths)
# sanity, all nodes got replicated
found_replicate_calls = defaultdict(int)
for req in conn_log.requests:
self.assertEqual(req['method'], 'REPLICATE')
found_replicate_key = (
int(req['headers']['X-Backend-Storage-Policy-Index']),
req['path'])
found_replicate_calls[found_replicate_key] += 1
expected_replicate_calls = {
(0, '/sda/1/a83'): 3,
(1, '/sda/1/a83'): 3,
(0, '/sda/0'): 2,
(0, '/sda/0/a83'): 2,
(1, '/sda/0'): 2,
(1, '/sda/0/a83'): 2,
(0, '/sda/2'): 2,
(1, '/sda/2'): 2,
(0, '/sda/3'): 2,
(1, '/sda/3'): 2,
}
self.assertEqual(dict(found_replicate_calls),
expected_replicate_calls)
def test_handoffs_first_mode_will_abort_if_handoffs_remaining(self):
# make an object in the handoff partition
handoff_suffix_paths = []
for policy in POLICIES:
ts = next(self.ts)
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o', policy)
with df.create() as w:
w.write(b'asdf')
w.put({'X-Timestamp': ts.internal})
w.commit(ts)
handoff_suffix_paths.append(os.path.dirname(df._datadir))
process_arg_checker = [
# (return_code, stdout, <each in capture rsync args>)
(0, '', []),
(1, '', []),
(0, '', []),
(0, '', []),
(0, '', []),
(0, '', []),
]
stub_body = pickle.dumps({})
with _mock_process(process_arg_checker) as rsync_log, \
mock.patch('swift.obj.replicator.whataremyips',
side_effect=_ips), \
mocked_http_conn(*[200] * 5, body=stub_body) as conn_log:
self.replicator.handoffs_first = True
self.replicator.replicate()
# stopped after handoffs!
self.assertEqual(1, self.replicator.handoffs_remaining)
self.assertEqual(8, self.replicator.job_count)
self.assertEqual(self.replicator.total_stats.failure, 1)
# in addition to the two update_deleted jobs as many as "concurrency"
# jobs may have been spawned into the pool before the failed
# update_deleted job incremented handoffs_remaining and caused the
# handoffs_first check to abort the current pass
self.assertLessEqual(self.replicator.total_stats.attempted,
2 + self.replicator.concurrency)
# sanity, all the handoffs suffixes we filled in were rsync'd
found_rsync_suffix_paths = set()
expected_replicate_requests = set()
for subprocess_info in rsync_log:
local_path, remote_path = subprocess_info['rsync_args'][-2:]
found_rsync_suffix_paths.add(local_path)
if subprocess_info['ret_code'] == 0:
node_ip = remote_path.split(':', 1)[0]
expected_replicate_requests.add(node_ip)
self.assertEqual(set(handoff_suffix_paths), found_rsync_suffix_paths)
# sanity, all successful rsync nodes got REPLICATE requests
found_replicate_requests = set()
self.assertEqual(5, len(conn_log.requests))
for req in conn_log.requests:
self.assertEqual(req['method'], 'REPLICATE')
found_replicate_requests.add(req['ip'])
self.assertEqual(expected_replicate_requests,
found_replicate_requests)
# and at least one partition got removed!
remaining_policies = []
for path in handoff_suffix_paths:
if os.path.exists(path):
policy = diskfile.extract_policy(path)
remaining_policies.append(policy)
self.assertEqual(len(remaining_policies), 1)
remaining_policy = remaining_policies[0]
# try again but with handoff_delete allowing for a single failure
with _mock_process(process_arg_checker) as rsync_log, \
mock.patch('swift.obj.replicator.whataremyips',
side_effect=_ips), \
mocked_http_conn(*[200] * 14, body=stub_body) as conn_log:
self.replicator.handoff_delete = 2
self.replicator._zero_stats()
self.replicator.replicate()
# all jobs processed!
self.assertEqual(self.replicator.job_count,
self.replicator.total_stats.attempted)
self.assertFalse(self.replicator.handoffs_remaining)
# sanity, all parts got replicated
found_replicate_calls = defaultdict(int)
for req in conn_log.requests:
self.assertEqual(req['method'], 'REPLICATE')
found_replicate_key = (
int(req['headers']['X-Backend-Storage-Policy-Index']),
req['path'])
found_replicate_calls[found_replicate_key] += 1
expected_replicate_calls = {
(int(remaining_policy), '/sda/1/a83'): 2,
(0, '/sda/0'): 2,
(1, '/sda/0'): 2,
(0, '/sda/2'): 2,
(1, '/sda/2'): 2,
(0, '/sda/3'): 2,
(1, '/sda/3'): 2,
}
self.assertEqual(dict(found_replicate_calls),
expected_replicate_calls)
# and now all handoff partitions have been rebalanced away!
removed_paths = set()
for path in handoff_suffix_paths:
if not os.path.exists(path):
removed_paths.add(path)
self.assertEqual(removed_paths, set(handoff_suffix_paths))
def test_replicator_skips_bogus_partition_dirs(self):
# A directory in the wrong place shouldn't crash the replicator
rmtree(self.objects)
rmtree(self.objects_1)
os.mkdir(self.objects)
os.mkdir(self.objects_1)
os.mkdir(os.path.join(self.objects, "burrito"))
jobs = self.replicator.collect_jobs()
self.assertEqual(len(jobs), 0)
def test_replicator_skips_rsync_temp_files(self):
# the empty pre-setup dirs aren't that useful to us
device_path = os.path.join(self.devices, 'sda')
rmtree(device_path, ignore_errors=1)
os.mkdir(device_path)
# create a real data file to trigger rsync
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES.legacy)
ts = next(self.ts)
with df.create() as w:
w.write(b'asdf')
w.put({'X-Timestamp': ts.internal})
w.commit(ts)
# pre-flight and post sync request for both other primaries
expected_replicate_requests = 4
process_arg_checker = [
# (return_code, stdout, <each in capture rsync args>)
(0, '', []),
(0, '', []),
]
stub_body = pickle.dumps({})
with _mock_process(process_arg_checker) as rsync_log, \
mock.patch('swift.obj.replicator.whataremyips',
side_effect=_ips), \
mocked_http_conn(*[200] * expected_replicate_requests,
body=stub_body) as conn_log:
self.replicator.replicate()
self.assertEqual(['REPLICATE'] * expected_replicate_requests,
[r['method'] for r in conn_log.requests])
# expect one rsync to each other primary node
self.assertEqual(2, len(rsync_log))
expected = '--exclude=.*.[0-9a-zA-Z][0-9a-zA-Z][0-9a-zA-Z]' \
'[0-9a-zA-Z][0-9a-zA-Z][0-9a-zA-Z]'
for subprocess_info in rsync_log:
rsync_args = subprocess_info['rsync_args']
for arg in rsync_args:
if arg.startswith('--exclude'):
self.assertEqual(arg, expected)
break
else:
self.fail('Did not find --exclude argument in %r' %
rsync_args)
def test_replicator_removes_zbf(self):
# After running xfs_repair, a partition directory could become a
# zero-byte file. If this happens, the replicator should clean it
# up, log something, and move on to the next partition.
# Surprise! Partition dir 1 is actually a zero-byte file.
pol_0_part_1_path = os.path.join(self.objects, '1')
rmtree(pol_0_part_1_path)
with open(pol_0_part_1_path, 'w'):
pass
self.assertTrue(os.path.isfile(pol_0_part_1_path)) # sanity check
# Policy 1's partition dir 1 is also a zero-byte file.
pol_1_part_1_path = os.path.join(self.objects_1, '1')
rmtree(pol_1_part_1_path)
with open(pol_1_part_1_path, 'w'):
pass
self.assertTrue(os.path.isfile(pol_1_part_1_path)) # sanity check
# Don't delete things in collect_jobs(); all the stat() calls would
# make replicator startup really slow.
self.replicator.collect_jobs()
self.assertTrue(os.path.exists(pol_0_part_1_path))
self.assertTrue(os.path.exists(pol_1_part_1_path))
# After a replication pass, the files should be gone
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.run_once()
self.assertFalse(os.path.exists(pol_0_part_1_path))
self.assertFalse(os.path.exists(pol_1_part_1_path))
self.assertEqual(
sorted(self.logger.get_lines_for_level('warning')), [
('Removing partition directory which was a file: %s'
% pol_1_part_1_path),
('Removing partition directory which was a file: %s'
% pol_0_part_1_path),
])
def test_delete_partition(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_default_sync_method(self):
self.replicator.conf.pop('sync_method')
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_ssync_single_region(self):
devs = [
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '127.0.0.0', 'port': 6200},
{'id': 1, 'device': 'sda', 'zone': 1,
'region': 1, 'ip': '127.0.0.1', 'port': 6200},
{'id': 2, 'device': 'sda', 'zone': 2,
'region': 1, 'ip': '127.0.0.2', 'port': 6200},
{'id': 3, 'device': 'sda', 'zone': 4,
'region': 1, 'ip': '127.0.0.3', 'port': 6200},
{'id': 4, 'device': 'sda', 'zone': 5,
'region': 1, 'ip': '127.0.0.4', 'port': 6200},
{'id': 5, 'device': 'sda', 'zone': 6,
'region': 1, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200},
{'id': 6, 'device': 'sda', 'zone': 7, 'region': 1,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6200},
]
_create_test_rings(self.testdir, devs=devs)
self.conf['sync_method'] = 'ssync'
self.replicator = object_replicator.ObjectReplicator(self.conf)
self.replicator.logger = debug_logger()
self.replicator._zero_stats()
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
ts = normalize_timestamp(time.time())
f = open(os.path.join(df._datadir, ts + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
def _fake_ssync(node, job, suffixes, **kwargs):
return True, {ohash: ts}
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_1(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES[1])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects_1, '1', data_dir)
part_path = os.path.join(self.objects_1, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[1])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for node in nodes:
rsync_mod = '%s::object/sda/objects-1/%s' % (node['ip'], 1)
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_with_failures(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for i, node in enumerate(nodes):
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
if i == 0:
# force one of the rsync calls to fail
ret_code = 1
else:
ret_code = 0
process_arg_checker.append(
(ret_code, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
# The path should still exist
self.assertTrue(os.access(part_path, os.F_OK))
def test_delete_partition_with_handoff_delete(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.handoff_delete = 2
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for i, node in enumerate(nodes):
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
if i == 0:
# force one of the rsync calls to fail
ret_code = 1
else:
ret_code = 0
process_arg_checker.append(
(ret_code, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
self.assertFalse(os.access(part_path, os.F_OK))
def test_delete_partition_with_handoff_delete_failures(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.handoff_delete = 2
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for i, node in enumerate(nodes):
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
if i in (0, 1):
# force two of the rsync calls to fail
ret_code = 1
else:
ret_code = 0
process_arg_checker.append(
(ret_code, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(part_path, os.F_OK))
def test_delete_partition_with_handoff_delete_fail_in_other_region(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '1', data_dir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
ring = self.replicator.load_object_ring(POLICIES[0])
nodes = [node for node in
ring.get_part_nodes(1)
if node['ip'] not in _ips()]
process_arg_checker = []
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
if node['region'] != 1:
# the rsync calls for other region to fail
ret_code = 1
else:
ret_code = 0
process_arg_checker.append(
(ret_code, '', ['rsync', whole_path_from, rsync_mod]))
with _mock_process(process_arg_checker):
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(part_path, os.F_OK))
def test_delete_partition_override_params(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate(override_devices=['sdb'])
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate(override_partitions=[9])
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate(override_devices=['sda'],
override_partitions=[1])
self.assertFalse(os.access(part_path, os.F_OK))
def _make_OSError(self, err):
return OSError(err, os.strerror(err))
def test_delete_partition_override_params_os_not_empty_error(self):
part_path = os.path.join(self.objects, '1')
with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
mockrmtree.side_effect = self._make_OSError(errno.ENOTEMPTY)
self.replicator.replicate(override_devices=['sda'],
override_partitions=[1],
override_policies=[0])
error_lines = self.replicator.logger.get_lines_for_level('error')
self.assertFalse(error_lines)
self.assertTrue(os.path.exists(part_path))
self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
def test_delete_partition_ignores_os_no_entity_error(self):
part_path = os.path.join(self.objects, '1')
with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
mockrmtree.side_effect = self._make_OSError(errno.ENOENT)
self.replicator.replicate(override_devices=['sda'],
override_partitions=[1],
override_policies=[0])
error_lines = self.replicator.logger.get_lines_for_level('error')
self.assertFalse(error_lines)
self.assertTrue(os.path.exists(part_path))
self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
def test_delete_partition_ignores_os_no_data_error(self):
part_path = os.path.join(self.objects, '1')
with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
mockrmtree.side_effect = self._make_OSError(errno.ENODATA)
self.replicator.replicate(override_devices=['sda'],
override_partitions=[1],
override_policies=[0])
error_lines = self.replicator.logger.get_lines_for_level('error')
self.assertFalse(error_lines)
self.assertTrue(os.path.exists(part_path))
self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
def test_delete_policy_override_params(self):
df0 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o',
policy=POLICIES.legacy)
df1 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o',
policy=POLICIES[1])
mkdirs(df0._datadir)
mkdirs(df1._datadir)
pol0_part_path = os.path.join(self.objects, '99')
pol1_part_path = os.path.join(self.objects_1, '99')
# sanity checks
self.assertTrue(os.access(pol0_part_path, os.F_OK))
self.assertTrue(os.access(pol1_part_path, os.F_OK))
# a bogus policy index doesn't bother the replicator any more than a
# bogus device or partition does
self.replicator.run_once(policies='1,2,5')
self.assertFalse(os.access(pol1_part_path, os.F_OK))
self.assertTrue(os.access(pol0_part_path, os.F_OK))
# since we weren't operating on everything, but only a subset of
# storage policies, we didn't dump any recon stats.
self.assertFalse(os.path.exists(
os.path.join(self.recon_cache, RECON_OBJECT_FILE)))
def test_delete_partition_ssync(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
ts = normalize_timestamp(time.time())
f = open(os.path.join(df._datadir, ts + '.data'),
'wb')
f.write(b'0')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
self.call_nums = 0
self.conf['sync_method'] = 'ssync'
def _fake_ssync(node, job, suffixes, **kwargs):
success = True
ret_val = {ohash: ts}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = False
ret_val = {}
self.call_nums += 1
return success, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate()
# The file should be deleted at the second replicate call
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate()
# The partition should be deleted at the third replicate call
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertFalse(os.access(part_path, os.F_OK))
del self.call_nums
def test_delete_partition_ssync_with_sync_failure(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
ts = normalize_timestamp(time.time())
mkdirs(df._datadir)
f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write(b'0')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
self.call_nums = 0
self.conf['sync_method'] = 'ssync'
def _fake_ssync(node, job, suffixes, **kwags):
success = False
ret_val = {}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = True
ret_val = {ohash: ts}
self.call_nums += 1
return success, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
del self.call_nums
def test_delete_objs_ssync_only_when_in_sync(self):
self.replicator.logger = debug_logger('test-replicator')
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
ts = normalize_timestamp(time.time())
f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write(b'0')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
self.call_nums = 0
self.conf['sync_method'] = 'ssync'
in_sync_objs = {}
def _fake_ssync(node, job, suffixes, remote_check_objs=None):
self.call_nums += 1
if remote_check_objs is None:
# sync job
ret_val = {ohash: ts}
else:
ret_val = in_sync_objs
return True, ret_val
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
self.assertEqual(3, self.call_nums)
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
del self.call_nums
def test_delete_partition_ssync_with_cleanup_failure(self):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.logger = mock_logger = \
debug_logger('test-replicator')
df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
ts = normalize_timestamp(time.time())
f = open(os.path.join(df._datadir, ts + '.data'), 'wb')
f.write(b'0')
f.close()
ohash = hash_path('a', 'c', 'o')
whole_path_from = storage_directory(self.objects, 1, ohash)
suffix_dir_path = os.path.dirname(whole_path_from)
part_path = os.path.join(self.objects, '1')
self.assertTrue(os.access(part_path, os.F_OK))
self.call_nums = 0
self.conf['sync_method'] = 'ssync'
def _fake_ssync(node, job, suffixes, **kwargs):
success = True
ret_val = {ohash: ts}
if self.call_nums == 2:
# ssync should return (True, []) only when the second
# candidate node has not get the replica yet.
success = False
ret_val = {}
self.call_nums += 1
return success, ret_val
rmdir_func = os.rmdir
def raise_exception_rmdir(exception_class, error_no):
instance = exception_class()
instance.errno = error_no
instance.strerror = os.strerror(error_no)
def func(directory, dir_fd=None):
if directory == suffix_dir_path:
raise instance
else:
rmdir_func(directory)
return func
self.replicator.sync_method = _fake_ssync
self.replicator.replicate()
# The file should still exist
self.assertTrue(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
# Fail with ENOENT
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOENT)):
self.replicator.replicate()
self.assertFalse(mock_logger.get_lines_for_level('error'))
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
# Fail with ENOTEMPTY
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOTEMPTY)):
self.replicator.replicate()
self.assertFalse(mock_logger.get_lines_for_level('error'))
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
# Fail with ENOTDIR
with mock.patch('os.rmdir',
raise_exception_rmdir(OSError, ENOTDIR)):
self.replicator.replicate()
self.assertEqual(mock_logger.get_lines_for_level('error'), [
'Unexpected error trying to cleanup suffix dir %r: ' %
os.path.dirname(df._datadir),
])
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertTrue(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
# Finally we can cleanup everything
self.replicator.replicate()
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertTrue(os.access(part_path, os.F_OK))
self.replicator.replicate()
self.assertFalse(os.access(whole_path_from, os.F_OK))
self.assertFalse(os.access(suffix_dir_path, os.F_OK))
self.assertFalse(os.access(part_path, os.F_OK))
def test_run_once_recover_from_failure(self):
conf = dict(swift_dir=self.testdir, devices=self.devices,
bind_ip=_ips()[0],
mount_check='false', timeout='300', stats_interval='1')
replicator = object_replicator.ObjectReplicator(conf)
was_connector = object_replicator.http_connect
try:
object_replicator.http_connect = mock_http_connect(200)
# Write some files into '1' and run replicate- they should be moved
# to the other partitions and then node should get deleted.
cur_part = '1'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES.legacy)
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, cur_part, data_dir)
ring = replicator.load_object_ring(POLICIES[0])
process_arg_checker = []
nodes = [node for node in
ring.get_part_nodes(int(cur_part))
if node['ip'] not in _ips()]
for node in nodes:
rsync_mod = '%s::object/sda/objects/%s' % (node['ip'],
cur_part)
process_arg_checker.append(
(0, '', ['rsync', whole_path_from, rsync_mod]))
self.assertTrue(os.access(os.path.join(self.objects,
'1', data_dir, ohash),
os.F_OK))
with _mock_process(process_arg_checker):
replicator.run_once()
self.assertFalse(process_errors)
for i, result in [('0', True), ('1', False),
('2', True), ('3', True)]:
self.assertEqual(os.access(
os.path.join(self.objects,
i, diskfile.HASH_FILE),
os.F_OK), result)
finally:
object_replicator.http_connect = was_connector
def test_run_once_recover_from_timeout(self):
# verify that replicator will pass over all policies' partitions even
# if a timeout occurs while replicating one partition to one node.
timeouts = [Timeout()]
def fake_get_hashes(df_mgr, device, partition, policy, **kwargs):
self.get_hash_count += 1
dev_path = df_mgr.get_dev_path(device)
part_path = os.path.join(dev_path, diskfile.get_data_dir(policy),
str(partition))
# Simulate a REPLICATE timeout by raising Timeout for second call
# to get_hashes (with recalculate suffixes) for a specific
# partition
if (timeouts and '/objects/' in part_path and
part_path.endswith('0') and 'recalculate' in kwargs):
raise timeouts.pop(0)
return 1, {'abc': 'def'}
# map partition_path -> [nodes]
sync_paths = collections.defaultdict(list)
def fake_sync(node, job, suffixes, *args, **kwargs):
sync_paths[job['path']].append(node)
return True, {}
conf = dict(swift_dir=self.testdir, devices=self.devices,
bind_ip=_ips()[0], # local dev has id=0
mount_check='false', timeout='300', stats_interval='1')
with mock.patch('swift.obj.diskfile.DiskFileManager._get_hashes',
fake_get_hashes):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
with mock.patch('swift.obj.replicator.dump_recon_cache'):
replicator = object_replicator.ObjectReplicator(
conf, logger=self.logger)
self.get_hash_count = 0
with mock.patch.object(replicator, 'sync', fake_sync):
replicator.run_once()
log_lines = replicator.logger.logger.get_lines_for_level('error')
self.assertIn("Error syncing with node:", log_lines[0])
self.assertFalse(log_lines[1:])
# setup creates 4 partitions; partition 1 does not map to local dev id
# 0 so will be handled by update_delete(); partitions 0, 2, 3 are
# handled by update() for each of two policies, so expect 6 paths to be
# sync'd
self.assertEqual(6, len(sync_paths))
# partition 3 has 2 nodes in remote region, only first node is sync'd.
# partition 0 in policy 0 has fake_get_hashes timeout before first
# sync, so only second node is sync'd.
# other partitions are sync'd to 2 nodes in same region.
expected_node_count = { # map path_end -> expected sync node count
'/objects/0': 1,
'/objects/1': 2,
'/objects/2': 2,
'/objects/3': 1,
'/objects-1/0': 2,
'/objects-1/1': 2,
'/objects-1/2': 2,
'/objects-1/3': 1
}
for path, nodes in sync_paths.items():
path_end = path[path.index('/objects'):]
self.assertEqual(expected_node_count[path_end], len(nodes),
'Expected %s but got %s for path %s' %
(expected_node_count[path_end], len(nodes), path))
# partitions 0 and 2 attempt 3 calls each per policy to get_hashes = 12
# partitions 3 attempts 2 calls per policy to get_hashes = 4
# partitions 1 dosn't get_hashes because of update_deleted
self.assertEqual(16, self.get_hash_count)
# attempt to 16 times but succeeded only 15 times due to Timeout
suffix_hashes = sum(
call[0][1] for call in
replicator.logger.logger.statsd_client.calls['update_stats']
if call[0][0] == 'suffix.hashes')
self.assertEqual(15, suffix_hashes)
def test_run(self):
with _mock_process([(0, '')] * 100):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.replicate()
def test_run_withlog(self):
with _mock_process([(0, "stuff in log")] * 100):
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)):
self.replicator.replicate()
def test_sync_just_calls_sync_method(self):
self.replicator.sync_method = mock.MagicMock()
self.replicator.sync('node', 'job', 'suffixes')
self.replicator.sync_method.assert_called_once_with(
'node', 'job', 'suffixes')
@mock.patch('swift.obj.replicator.tpool.execute')
@mock.patch('swift.obj.replicator.http_connect', autospec=True)
@mock.patch('swift.obj.replicator._do_listdir')
def test_update(self, mock_do_listdir, mock_http, mock_tpool_execute):
def set_default(self):
self.replicator.suffix_count = 0
self.replicator.suffix_sync = 0
self.replicator.suffix_hash = 0
self.replicator.last_replication_count = 0
self.replicator._zero_stats()
self.replicator.partition_times = []
self.headers = {'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
mock_tpool_execute.return_value = (0, {})
all_jobs = self.replicator.collect_jobs()
jobs = [job for job in all_jobs if not job['delete']]
mock_http.return_value = answer = mock.MagicMock()
answer.getresponse.return_value = resp = mock.MagicMock()
# Check incorrect http_connect with status 507 and
# count of attempts and call args
resp.status = 507
expected_listdir_calls = [
mock.call(int(job['partition']),
self.replicator.replication_cycle)
for job in jobs]
do_listdir_results = [False, False, True, False, True, False]
mock_do_listdir.side_effect = do_listdir_results
expected_tpool_calls = [
mock.call(self.replicator._df_router[job['policy']]._get_hashes,
job['device'], job['partition'], job['policy'],
do_listdir=do_listdir)
for job, do_listdir in zip(jobs, do_listdir_results)
]
for job in jobs:
set_default(self)
ring = job['policy'].object_ring
self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
self.replicator.update(job)
error_lines = self.logger.get_lines_for_level('error')
expected = []
error = '%s responded as unmounted'
# ... first the primaries
for node in job['nodes']:
node_str = utils.node_to_string(node, replication=True)
expected.append(error % node_str)
# ... then it will get handoffs
for node in job['policy'].object_ring.get_more_nodes(
int(job['partition'])):
node_str = utils.node_to_string(node, replication=True)
expected.append(error % node_str)
# ... and finally we get an error about running out of nodes
expected.append('Ran out of handoffs while replicating '
'partition %s of policy %d' %
(job['partition'], job['policy']))
self.assertEqual(expected, error_lines)
self.assertEqual(len(self.replicator.partition_times), 1)
self.assertEqual(mock_http.call_count, len(ring._devs) - 1)
reqs = []
for node in job['nodes']:
reqs.append(mock.call(node['ip'], node['port'], node['device'],
job['partition'], 'REPLICATE', '',
headers=self.headers))
if job['partition'] == '0':
self.assertEqual(self.replicator.suffix_hash, 0)
mock_http.assert_has_calls(reqs, any_order=True)
mock_http.reset_mock()
self.logger.clear()
mock_do_listdir.assert_has_calls(expected_listdir_calls)
mock_tpool_execute.assert_has_calls(expected_tpool_calls)
mock_do_listdir.side_effect = None
mock_do_listdir.return_value = False
# Check incorrect http_connect with status 400 != HTTP_OK
resp.status = 400
error = 'Invalid response %(resp)s from %(node)s'
for job in jobs:
set_default(self)
self.replicator.update(job)
# ... only the primaries
expected = [
error % {
"resp": 400,
"node": utils.node_to_string(node, replication=True)}
for node in job['nodes']]
self.assertEqual(expected,
self.logger.get_lines_for_level('error'))
self.assertEqual(len(self.replicator.partition_times), 1)
self.logger.clear()
# Check successful http_connection and exception with
# incorrect pickle.loads(resp.read())
resp.status = 200
resp.read.return_value = b'garbage'
expect = 'Error syncing with node: %s: '
for job in jobs:
set_default(self)
self.replicator.update(job)
# ... only the primaries
expected = [expect % utils.node_to_string(node, replication=True)
for node in job['nodes']]
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(expected, error_lines)
self.assertEqual(len(self.replicator.partition_times), 1)
self.logger.clear()
# Check successful http_connection and correct
# pickle.loads(resp.read()) for non local node
resp.status = 200
local_job = None
resp.read.return_value = pickle.dumps({})
for job in jobs:
set_default(self)
# limit local job to policy 0 for simplicity
if job['partition'] == '0' and int(job['policy']) == 0:
local_job = job.copy()
continue
self.replicator.update(job)
self.assertEqual([], self.logger.get_lines_for_level('error'))
self.assertEqual(len(self.replicator.partition_times), 1)
self.assertEqual(self.replicator.suffix_hash, 0)
self.assertEqual(self.replicator.suffix_sync, 0)
self.assertEqual(self.replicator.suffix_count, 0)
self.logger.clear()
# Check successful http_connect and sync for local node
mock_tpool_execute.return_value = (1, {'a83': 'ba47fd314242ec8c'
'7efb91f5d57336e4'})
resp.read.return_value = pickle.dumps({'a83': 'c130a2c17ed45102a'
'ada0f4eee69494ff'})
set_default(self)
self.replicator.sync = fake_func = \
mock.MagicMock(return_value=(True, []))
self.replicator.update(local_job)
reqs = []
for node in local_job['nodes']:
reqs.append(mock.call(node, local_job, ['a83']))
fake_func.assert_has_calls(reqs, any_order=True)
self.assertEqual(fake_func.call_count, 2)
stats = self.replicator.total_stats
self.assertEqual(stats.attempted, 1)
self.assertEqual(stats.suffix_sync, 2)
self.assertEqual(stats.suffix_hash, 1)
self.assertEqual(stats.suffix_count, 1)
self.assertEqual(stats.hashmatch, 0)
# Efficient Replication Case
set_default(self)
self.replicator.sync = fake_func = \
mock.MagicMock(return_value=(True, []))
all_jobs = self.replicator.collect_jobs()
job = None
for tmp in all_jobs:
if tmp['partition'] == '3':
job = tmp
break
# The candidate nodes to replicate (i.e. dev1 and dev3)
# belong to another region
self.replicator.update(job)
self.assertEqual(fake_func.call_count, 1)
stats = self.replicator.total_stats
self.assertEqual(stats.attempted, 1)
self.assertEqual(stats.suffix_sync, 1)
self.assertEqual(stats.suffix_hash, 1)
self.assertEqual(stats.suffix_count, 1)
self.assertEqual(stats.hashmatch, 0)
mock_http.reset_mock()
self.logger.clear()
# test for replication params on policy 0 only
repl_job = local_job.copy()
for node in repl_job['nodes']:
node['replication_ip'] = '127.0.0.11'
node['replication_port'] = '6011'
set_default(self)
# with only one set of headers make sure we specify index 0 here
# as otherwise it may be different from earlier tests
self.headers['X-Backend-Storage-Policy-Index'] = 0
self.replicator.update(repl_job)
reqs = []
for node in repl_job['nodes']:
reqs.append(mock.call(node['replication_ip'],
node['replication_port'], node['device'],
repl_job['partition'], 'REPLICATE',
'', headers=self.headers))
mock_http.assert_has_calls(reqs, any_order=True)
@mock.patch('swift.obj.replicator.tpool.execute')
@mock.patch('swift.obj.replicator.http_connect', autospec=True)
@mock.patch('swift.obj.replicator._do_listdir')
def test_update_local_hash_changes_during_replication(
self, mock_do_listdir, mock_http, mock_tpool_execute):
mock_http.return_value = answer = mock.MagicMock()
answer.getresponse.return_value = resp = mock.MagicMock()
resp.status = 200
resp.read.return_value = pickle.dumps({
'a83': 'c130a2c17ed45102aada0f4eee69494ff'})
self.replicator.sync = fake_sync = \
mock.MagicMock(return_value=(True, []))
local_job = [
job for job in self.replicator.collect_jobs()
if not job['delete']
and job['partition'] == '0' and int(job['policy']) == 0
][0]
mock_tpool_execute.side_effect = [
(1, {'a83': 'ba47fd314242ec8c7efb91f5d57336e4'}),
(1, {'a83': 'c130a2c17ed45102aada0f4eee69494ff'}),
]
self.replicator.update(local_job)
self.assertEqual(fake_sync.call_count, 0)
self.assertEqual(mock_http.call_count, 2)
stats = self.replicator.total_stats
self.assertEqual(stats.attempted, 1)
self.assertEqual(stats.suffix_sync, 0)
self.assertEqual(stats.suffix_hash, 1)
self.assertEqual(stats.suffix_count, 1)
self.assertEqual(stats.hashmatch, 2)
def test_rsync_compress_different_region(self):
self.assertEqual(self.replicator.sync_method, self.replicator.rsync)
jobs = self.replicator.collect_jobs()
_m_rsync = mock.Mock(return_value=0)
_m_os_path_exists = mock.Mock(return_value=True)
expected_reqs = []
with mock.patch.object(self.replicator, '_rsync', _m_rsync), \
mock.patch('os.path.exists', _m_os_path_exists), \
mocked_http_conn(
*[200] * 2 * sum(len(job['nodes']) for job in jobs),
body=pickle.dumps('{}')) as request_log:
for job in jobs:
self.assertTrue('region' in job)
for node in job['nodes']:
for rsync_compress in (True, False):
expected_reqs.append((
'REPLICATE', node['ip'],
'/%s/%s/fake_suffix' % (
node['device'], job['partition']),
))
self.replicator.rsync_compress = rsync_compress
ret = self.replicator.sync(node, job,
['fake_suffix'])
self.assertTrue(ret)
if node['region'] != job['region']:
if rsync_compress:
# --compress arg should be passed to rsync
# binary only when rsync_compress option is
# enabled AND destination node is in a
# different region
self.assertTrue('--compress' in
_m_rsync.call_args[0][0])
else:
self.assertFalse('--compress' in
_m_rsync.call_args[0][0])
else:
self.assertFalse('--compress' in
_m_rsync.call_args[0][0])
self.assertEqual(
_m_os_path_exists.call_args_list[-1][0][0],
os.path.join(job['path'], 'fake_suffix'))
self.assertEqual(
_m_os_path_exists.call_args_list[-2][0][0],
os.path.join(job['path']))
self.assertEqual(expected_reqs, [
(r['method'], r['ip'], r['path']) for r in request_log.requests])
def test_rsync_failure_logging(self):
with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
b'',
b'cd+++++++++ suf',
b'cd+++++++++ suf/hash1',
b'<f+++++++++ suf/hash1/1637956993.28907.data',
b'',
b'cd+++++++++ suf/hash2',
b'<f+++++++++ suf/hash2/1615174984.55017.data',
b'',
b'cd+++++++++ suf/hash3',
b'<f+++++++++ suf/hash3/1616276756.37760.data',
b'<f+++++++++ suf/hash3/1637954870.98055.meta',
b'',
b'Oh no, some error!',
]))
mock_popen.return_value.wait.return_value = 5
self.assertEqual(5, self.replicator._rsync([
'rsync', '--recursive', '--whole-file', '--human-readable',
'--xattrs', '--itemize-changes', '--ignore-existing',
'--timeout=30', '--contimeout=30', '--bwlimit=100M',
'--exclude=rsync-tempfile-pattern',
'/srv/node/d1/objects/part/suf',
'192.168.50.30::object/d8/objects/241']))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(error_lines[:5], [
'<f+++++++++ suf/hash1/1637956993.28907.data',
'<f+++++++++ suf/hash2/1615174984.55017.data',
'<f+++++++++ suf/hash3/1616276756.37760.data',
'<f+++++++++ suf/hash3/1637954870.98055.meta',
'Oh no, some error!',
])
expected_start = "Bad rsync return code: 5 <- ['rsync', '--recursive'"
self.assertEqual(error_lines[5][:len(expected_start)], expected_start,
'Expected %r to start with %r' % (error_lines[5],
expected_start))
self.assertFalse(error_lines[6:])
self.assertFalse(self.logger.get_lines_for_level('info'))
self.assertFalse(self.logger.get_lines_for_level('debug'))
def test_rsync_failure_logging_no_transfer(self):
with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
b'',
b'cd+++++++++ suf',
b'cd+++++++++ suf/hash1',
b'<f+++++++++ suf/hash1/1637956993.28907.data',
b'',
b'cd+++++++++ suf/hash2',
b'<f+++++++++ suf/hash2/1615174984.55017.data',
b'',
b'cd+++++++++ suf/hash3',
b'<f+++++++++ suf/hash3/1616276756.37760.data',
b'<f+++++++++ suf/hash3/1637954870.98055.meta',
b'',
b'Oh no, some error!',
]))
mock_popen.return_value.wait.return_value = 5
self.replicator.log_rsync_transfers = False
self.assertEqual(5, self.replicator._rsync([
'rsync', '--recursive', '--whole-file', '--human-readable',
'--xattrs', '--itemize-changes', '--ignore-existing',
'--timeout=30', '--contimeout=30', '--bwlimit=100M',
'--exclude=rsync-tempfile-pattern',
'/srv/node/d1/objects/part/suf',
'192.168.50.30::object/d8/objects/241']))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(error_lines[0], 'Oh no, some error!')
expected_start = "Bad rsync return code: 5 <- ['rsync', '--recursive'"
self.assertEqual(error_lines[1][:len(expected_start)], expected_start,
'Expected %r to start with %r' % (error_lines[1],
expected_start))
self.assertFalse(error_lines[2:])
self.assertFalse(self.logger.get_lines_for_level('info'))
self.assertFalse(self.logger.get_lines_for_level('debug'))
def test_rsync_success_logging(self):
with mock.patch(
'swift.obj.replicator.subprocess.Popen') as mock_popen, \
mock.patch('time.time', side_effect=[123.4, 123.5]):
mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
b'',
b'cd+++++++++ suf',
b'cd+++++++++ suf/hash1',
b'<f+++++++++ suf/hash1/1637956993.28907.data',
b'',
b'cd+++++++++ suf/hash2',
b'<f+++++++++ suf/hash2/1615174984.55017.data',
b'',
b'cd+++++++++ suf/hash3',
b'<f+++++++++ suf/hash3/1616276756.37760.data',
b'<f+++++++++ suf/hash3/1637954870.98055.meta',
b'',
b'Yay! It worked!',
]))
mock_popen.return_value.wait.return_value = 0
self.assertEqual(0, self.replicator._rsync([
'rsync', '--recursive', '--whole-file', '--human-readable',
'--xattrs', '--itemize-changes', '--ignore-existing',
'--timeout=30', '--contimeout=30', '--bwlimit=100M',
'--exclude=rsync-tempfile-pattern',
'/srv/node/d1/objects/part/suf',
'192.168.50.30::object/d8/objects/241']))
self.assertFalse(self.logger.get_lines_for_level('error'))
debug_lines = self.logger.get_lines_for_level('debug')
self.assertEqual(debug_lines, [
'<f+++++++++ suf/hash1/1637956993.28907.data',
'<f+++++++++ suf/hash2/1615174984.55017.data',
'<f+++++++++ suf/hash3/1616276756.37760.data',
'<f+++++++++ suf/hash3/1637954870.98055.meta',
'Yay! It worked!',
])
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(info_lines, [
'Successful rsync of /srv/node/d1/objects/part/... to '
'192.168.50.30::object/d8/objects/241 (0.100)'])
def test_rsync_success_logging_no_transfer(self):
with mock.patch(
'swift.obj.replicator.subprocess.Popen') as mock_popen, \
mock.patch('time.time', side_effect=[123.4, 123.5]):
mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
b'',
b'cd+++++++++ sf1',
b'cd+++++++++ sf1/hash1',
b'<f+++++++++ sf1/hash1/1637956993.28907.data',
b'',
b'cd+++++++++ sf1/hash2',
b'<f+++++++++ sf1/hash2/1615174984.55017.data',
b'',
b'cd+++++++++ sf2/hash3',
b'<f+++++++++ sf2/hash3/1616276756.37760.data',
b'<f+++++++++ sf2/hash3/1637954870.98055.meta',
b'',
b'Yay! It worked!',
]))
mock_popen.return_value.wait.return_value = 0
self.replicator.log_rsync_transfers = False
self.assertEqual(0, self.replicator._rsync([
'rsync', '--recursive', '--whole-file', '--human-readable',
'--xattrs', '--itemize-changes', '--ignore-existing',
'--timeout=30', '--contimeout=30', '--bwlimit=100M',
'--exclude=rsync-tempfile-pattern',
'/srv/node/d1/objects/part/sf1',
'/srv/node/d1/objects/part/sf2',
'192.168.50.30::object/d8/objects/241']))
self.assertFalse(self.logger.get_lines_for_level('error'))
debug_lines = self.logger.get_lines_for_level('debug')
self.assertEqual(debug_lines, ['Yay! It worked!'])
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(info_lines, [
'Successful rsync of /srv/node/d1/objects/part/... to '
'192.168.50.30::object/d8/objects/241 (0.100)'])
def test_do_listdir(self):
# Test if do_listdir is enabled for every 10th partition to rehash
# First number is the number of partitions in the job, list entries
# are the expected partition numbers per run
test_data = {
9: [1, 0, 1, 1, 1, 1, 1, 1, 1, 1],
29: [3, 2, 3, 3, 3, 3, 3, 3, 3, 3],
111: [12, 11, 11, 11, 11, 11, 11, 11, 11, 11]}
for partitions, expected in test_data.items():
seen = []
for phase in range(10):
invalidated = 0
for partition in range(partitions):
if object_replicator._do_listdir(partition, phase):
seen.append(partition)
invalidated += 1
# Every 10th partition is seen after each phase
self.assertEqual(expected[phase], invalidated)
# After 10 cycles every partition is seen exactly once
self.assertEqual(sorted(range(partitions)), sorted(seen))
def test_update_deleted_partition_lock_timeout(self):
self.replicator.handoffs_remaining = 0
jobs = self.replicator.collect_jobs()
delete_jobs = [j for j in jobs if j['delete']]
delete_jobs.sort(key=lambda j: j['policy'])
job = delete_jobs[0]
df_mgr = self.replicator._df_router[job['policy']]
with mock.patch.object(df_mgr, 'partition_lock',
side_effect=PartitionLockTimeout):
self.replicator.update_deleted(job)
logs = self.logger.get_lines_for_level('info')
self.assertEqual(['Unable to lock handoff partition 1 for '
'replication on device sda policy 0'], logs)
def test_replicate_skipped_partpower_increase(self):
_create_test_rings(self.testdir, next_part_power=4)
self.replicator.get_local_devices() # refresh rings
self.replicator.replicate()
self.assertEqual(0, self.replicator.job_count)
self.assertEqual(0, self.replicator.total_stats.attempted)
warnings = self.logger.get_lines_for_level('warning')
self.assertIn(
"next_part_power set in policy 'one'. Skipping", warnings)
def test_replicate_rsync_timeout(self):
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
mock_procs = []
def new_mock(*a, **kw):
proc = MockHungProcess()
mock_procs.append(proc)
return proc
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)), \
mock.patch.object(self.replicator, 'rsync_timeout', 0.01), \
mock.patch('eventlet.green.subprocess.Popen', new_mock):
self.replicator.rsync_error_log_line_length = 40
self.replicator.run_once()
for proc in mock_procs:
self.assertEqual(proc._calls, [
('wait', 'running'),
('kill', 'running'),
('wait', 'killed'),
])
self.assertEqual(len(mock_procs), 2)
error_lines = self.replicator.logger.get_lines_for_level('error')
# verify logs are truncated to rsync_error_log_line_length
self.assertEqual(["Killing long-running rsync after 0s: ['r"] * 2,
error_lines)
def test_replicate_rsync_timeout_wedged(self):
cur_part = '0'
df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
policy=POLICIES[0])
mkdirs(df._datadir)
f = open(os.path.join(df._datadir,
normalize_timestamp(time.time()) + '.data'),
'wb')
f.write(b'1234567890')
f.close()
mock_procs = []
def new_mock(*a, **kw):
proc = MockHungProcess(polls_needed=2)
mock_procs.append(proc)
return proc
with mock.patch('swift.obj.replicator.http_connect',
mock_http_connect(200)), \
mock.patch.object(self.replicator, 'rsync_timeout', 0.01), \
mock.patch('eventlet.green.subprocess.Popen', new_mock):
self.replicator.run_once()
for proc in mock_procs:
self.assertEqual(proc._calls, [
('wait', 'running'),
('kill', 'running'),
('wait', 'killed'),
('poll', 'killed'),
('poll', 'killed'),
])
self.assertEqual(len(mock_procs), 2)
def test_limit_rsync_log(self):
def do_test(length_limit, log_line, expected):
self.replicator.rsync_error_log_line_length = length_limit
result = self.replicator._limit_rsync_log(log_line)
self.assertEqual(result, expected)
tests = [{'length_limit': 20,
'log_line': 'a' * 20,
'expected': 'a' * 20},
{'length_limit': 20,
'log_line': 'a' * 19,
'expected': 'a' * 19},
{'length_limit': 20,
'log_line': 'a' * 21,
'expected': 'a' * 20},
{'length_limit': None,
'log_line': 'a' * 50,
'expected': 'a' * 50},
{'length_limit': 0,
'log_line': 'a' * 50,
'expected': 'a' * 50}]
for params in tests:
do_test(**params)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)])
class TestMultiProcessReplicator(unittest.TestCase):
def setUp(self):
# recon cache path
self.recon_cache = tempfile.mkdtemp()
rmtree(self.recon_cache, ignore_errors=1)
os.mkdir(self.recon_cache)
self.recon_file = os.path.join(self.recon_cache, RECON_OBJECT_FILE)
bind_port = 6200
# Set up some rings
self.testdir = tempfile.mkdtemp()
_create_test_rings(self.testdir, devs=[
{'id': 0, 'device': 'sda', 'zone': 0,
'region': 1, 'ip': '127.0.0.1', 'port': bind_port},
{'id': 1, 'device': 'sdb', 'zone': 0,
'region': 1, 'ip': '127.0.0.1', 'port': bind_port},
{'id': 2, 'device': 'sdc', 'zone': 0,
'region': 1, 'ip': '127.0.0.1', 'port': bind_port},
{'id': 3, 'device': 'sdd', 'zone': 0,
'region': 1, 'ip': '127.0.0.1', 'port': bind_port},
{'id': 4, 'device': 'sde', 'zone': 0,
'region': 1, 'ip': '127.0.0.1', 'port': bind_port},
{'id': 100, 'device': 'notme0', 'zone': 0,
'region': 1, 'ip': '127.99.99.99', 'port': bind_port}])
self.logger = debug_logger('test-replicator')
self.conf = dict(
bind_ip='127.0.0.1', bind_port=bind_port,
swift_dir=self.testdir,
mount_check='false', recon_cache_path=self.recon_cache,
timeout='300', stats_interval='1', sync_method='rsync')
self.replicator = object_replicator.ObjectReplicator(
self.conf, logger=self.logger)
def tearDown(self):
self.assertFalse(process_errors)
rmtree(self.testdir, ignore_errors=1)
rmtree(self.recon_cache, ignore_errors=1)
def fake_replicate(self, override_devices, **kw):
# Faked-out replicate() method. Just updates the stats, but doesn't
# do any work.
for device in override_devices:
stats = self.replicator.stats_for_dev[device]
if device == 'sda':
stats.attempted = 1
stats.success = 10
stats.failure = 100
stats.hashmatch = 1000
stats.rsync = 10000
stats.remove = 100000
stats.suffix_count = 1000000
stats.suffix_hash = 10000000
stats.suffix_sync = 100000000
stats.failure_nodes = {
'10.1.1.1': {'d11': 1}}
elif device == 'sdb':
stats.attempted = 2
stats.success = 20
stats.failure = 200
stats.hashmatch = 2000
stats.rsync = 20000
stats.remove = 200000
stats.suffix_count = 2000000
stats.suffix_hash = 20000000
stats.suffix_sync = 200000000
stats.failure_nodes = {
'10.2.2.2': {'d22': 2}}
elif device == 'sdc':
stats.attempted = 3
stats.success = 30
stats.failure = 300
stats.hashmatch = 3000
stats.rsync = 30000
stats.remove = 300000
stats.suffix_count = 3000000
stats.suffix_hash = 30000000
stats.suffix_sync = 300000000
stats.failure_nodes = {
'10.3.3.3': {'d33': 3}}
elif device == 'sdd':
stats.attempted = 4
stats.success = 40
stats.failure = 400
stats.hashmatch = 4000
stats.rsync = 40000
stats.remove = 400000
stats.suffix_count = 4000000
stats.suffix_hash = 40000000
stats.suffix_sync = 400000000
stats.failure_nodes = {
'10.4.4.4': {'d44': 4}}
elif device == 'sde':
stats.attempted = 5
stats.success = 50
stats.failure = 500
stats.hashmatch = 5000
stats.rsync = 50000
stats.remove = 500000
stats.suffix_count = 5000000
stats.suffix_hash = 50000000
stats.suffix_sync = 500000000
stats.failure_nodes = {
'10.5.5.5': {'d55': 5}}
else:
raise Exception("mock can't handle %r" % device)
def test_no_multiprocessing(self):
self.replicator.replicator_workers = 0
self.assertEqual(self.replicator.get_worker_args(), [])
def test_device_distribution(self):
self.replicator.replicator_workers = 2
self.assertEqual(self.replicator.get_worker_args(), [{
'override_devices': ['sda', 'sdc', 'sde'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdb', 'sdd'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 1,
}])
def test_override_policies(self):
self.replicator.replicator_workers = 2
args = self.replicator.get_worker_args(policies="3,5,7", once=True)
self.assertEqual(args, [{
'override_devices': ['sda', 'sdc', 'sde'],
'override_partitions': [],
'override_policies': [3, 5, 7],
'have_overrides': True,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdb', 'sdd'],
'override_partitions': [],
'override_policies': [3, 5, 7],
'have_overrides': True,
'multiprocess_worker_index': 1,
}])
# override policies don't apply in run-forever mode
args = self.replicator.get_worker_args(policies="3,5,7", once=False)
self.assertEqual(args, [{
'override_devices': ['sda', 'sdc', 'sde'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdb', 'sdd'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 1,
}])
def test_more_workers_than_disks(self):
self.replicator.replicator_workers = 999
self.assertEqual(self.replicator.get_worker_args(), [{
'override_devices': ['sda'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdb'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 1,
}, {
'override_devices': ['sdc'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 2,
}, {
'override_devices': ['sdd'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 3,
}, {
'override_devices': ['sde'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 4,
}])
# Remember how many workers we actually have so that the log-line
# prefixes are reasonable. Otherwise, we'd have five workers, each
# logging lines starting with things like "[worker X/999 pid=P]"
# despite there being only five.
self.assertEqual(self.replicator.replicator_workers, 5)
def test_command_line_overrides(self):
self.replicator.replicator_workers = 2
args = self.replicator.get_worker_args(
devices="sda,sdc,sdd", partitions="12,34,56", once=True)
self.assertEqual(args, [{
'override_devices': ['sda', 'sdd'],
'override_partitions': [12, 34, 56],
'override_policies': [],
'have_overrides': True,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdc'],
'override_partitions': [12, 34, 56],
'override_policies': [],
'have_overrides': True,
'multiprocess_worker_index': 1,
}])
args = self.replicator.get_worker_args(
devices="sda,sdc,sdd", once=True)
self.assertEqual(args, [{
'override_devices': ['sda', 'sdd'],
'override_partitions': [],
'override_policies': [],
'have_overrides': True,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdc'],
'override_partitions': [],
'override_policies': [],
'have_overrides': True,
'multiprocess_worker_index': 1,
}])
# no overrides apply in run-forever mode
args = self.replicator.get_worker_args(
devices="sda,sdc,sdd", partitions="12,34,56", once=False)
self.assertEqual(args, [{
'override_devices': ['sda', 'sdc', 'sde'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 0,
}, {
'override_devices': ['sdb', 'sdd'],
'override_partitions': [],
'override_policies': [],
'have_overrides': False,
'multiprocess_worker_index': 1,
}])
def test_worker_logging(self):
self.replicator.replicator_workers = 3
def log_some_stuff(*a, **kw):
self.replicator.logger.debug("debug message")
self.replicator.logger.info("info message")
self.replicator.logger.warning("warning message")
self.replicator.logger.error("error message")
with mock.patch.object(self.replicator, 'replicate', log_some_stuff), \
mock.patch("os.getpid", lambda: 8804):
self.replicator.get_worker_args()
self.replicator.run_once(multiprocess_worker_index=0,
override_devices=['sda', 'sdb'])
prefix = "[worker 1/3 pid=8804] "
for level, lines in self.logger.logger.all_log_lines().items():
for line in lines:
self.assertTrue(
line.startswith(prefix),
"%r doesn't start with %r (level %s)" % (
line, prefix, level))
def test_recon_run_once(self):
self.replicator.replicator_workers = 3
the_time = [1521680000]
def mock_time():
rv = the_time[0]
the_time[0] += 120
return rv
# Simulate a couple child processes
with mock.patch.object(self.replicator, 'replicate',
self.fake_replicate), \
mock.patch('time.time', mock_time):
self.replicator.get_worker_args()
self.replicator.run_once(multiprocess_worker_index=0,
override_devices=['sda', 'sdb'])
self.replicator.run_once(multiprocess_worker_index=1,
override_devices=['sdc'])
self.replicator.run_once(multiprocess_worker_index=2,
override_devices=['sdd', 'sde'])
with open(self.recon_file) as fh:
recon_data = json.load(fh)
self.assertIn('object_replication_per_disk', recon_data)
self.assertIn('sda', recon_data['object_replication_per_disk'])
self.assertIn('sdb', recon_data['object_replication_per_disk'])
self.assertIn('sdc', recon_data['object_replication_per_disk'])
self.assertIn('sdd', recon_data['object_replication_per_disk'])
self.assertIn('sde', recon_data['object_replication_per_disk'])
sda = recon_data['object_replication_per_disk']['sda']
# Spot-check a couple of fields
self.assertEqual(sda['replication_stats']['attempted'], 1)
self.assertEqual(sda['replication_stats']['success'], 10)
self.assertEqual(sda['object_replication_time'], 2) # minutes
self.assertEqual(sda['object_replication_last'], 1521680120)
# Aggregate the workers' recon updates
self.replicator.post_multiprocess_run()
with open(self.recon_file) as fh:
recon_data = json.load(fh)
self.assertEqual(recon_data['replication_stats']['attempted'], 15)
self.assertEqual(recon_data['replication_stats']['failure'], 1500)
self.assertEqual(recon_data['replication_stats']['hashmatch'], 15000)
self.assertEqual(recon_data['replication_stats']['remove'], 1500000)
self.assertEqual(recon_data['replication_stats']['rsync'], 150000)
self.assertEqual(recon_data['replication_stats']['success'], 150)
self.assertEqual(recon_data['replication_stats']['suffix_count'],
15000000)
self.assertEqual(recon_data['replication_stats']['suffix_hash'],
150000000)
self.assertEqual(recon_data['replication_stats']['suffix_sync'],
1500000000)
self.assertEqual(recon_data['replication_stats']['failure_nodes'], {
'10.1.1.1': {'d11': 1},
'10.2.2.2': {'d22': 2},
'10.3.3.3': {'d33': 3},
'10.4.4.4': {'d44': 4},
'10.5.5.5': {'d55': 5},
})
self.assertEqual(recon_data['object_replication_time'], 2) # minutes
self.assertEqual(recon_data['object_replication_last'], 1521680120)
def test_recon_skipped_with_overrides(self):
self.replicator.replicator_workers = 3
the_time = [1521680000]
def mock_time():
rv = the_time[0]
the_time[0] += 120
return rv
with mock.patch.object(self.replicator, 'replicate',
self.fake_replicate), \
mock.patch('time.time', mock_time):
self.replicator.get_worker_args()
self.replicator.run_once(multiprocess_worker_index=0,
have_overrides=True,
override_devices=['sda', 'sdb'])
self.assertFalse(os.path.exists(self.recon_file))
# have_overrides=False makes us get recon stats
with mock.patch.object(self.replicator, 'replicate',
self.fake_replicate), \
mock.patch('time.time', mock_time):
self.replicator.get_worker_args()
self.replicator.run_once(multiprocess_worker_index=0,
have_overrides=False,
override_devices=['sda', 'sdb'])
with open(self.recon_file) as fh:
recon_data = json.load(fh)
self.assertIn('sda', recon_data['object_replication_per_disk'])
def test_recon_run_forever(self):
the_time = [1521521521.52152]
def mock_time():
rv = the_time[0]
the_time[0] += 120
return rv
self.replicator.replicator_workers = 2
self.replicator._next_rcache_update = the_time[0]
# One worker has finished a pass, the other hasn't.
with mock.patch.object(self.replicator, 'replicate',
self.fake_replicate), \
mock.patch('time.time', mock_time):
self.replicator.get_worker_args()
# Yes, this says run_once, but this is only to populate
# object.recon with some stats. The real test is for the
# aggregation.
self.replicator.run_once(multiprocess_worker_index=0,
override_devices=['sda', 'sdb', 'sdc'])
# This will not produce aggregate stats since not every device has
# finished a pass.
the_time[0] += self.replicator.stats_interval
with mock.patch('time.time', mock_time):
rv = self.replicator.is_healthy()
self.assertTrue(rv)
with open(self.recon_file) as fh:
recon_data = json.load(fh)
self.assertNotIn('replication_stats', recon_data)
# Now all the local devices have completed a replication pass, so we
# will produce aggregate stats.
with mock.patch.object(self.replicator, 'replicate',
self.fake_replicate), \
mock.patch('time.time', mock_time):
self.replicator.get_worker_args()
self.replicator.run_once(multiprocess_worker_index=1,
override_devices=['sdd', 'sde'])
the_time[0] += self.replicator.stats_interval
with mock.patch('time.time', mock_time):
rv = self.replicator.is_healthy()
self.assertTrue(rv)
with open(self.recon_file) as fh:
recon_data = json.load(fh)
self.assertIn('replication_stats', recon_data)
# no need to exhaustively check every sum
self.assertEqual(recon_data['replication_stats']['attempted'], 15)
self.assertEqual(recon_data['replication_stats']['success'], 150)
self.assertEqual(
recon_data['replication_last'],
min(pd['replication_last']
for pd in recon_data['object_replication_per_disk'].values()))
class TestReplicatorStats(unittest.TestCase):
def test_to_recon(self):
st = object_replicator.Stats(
attempted=1, failure=2, hashmatch=3, remove=4,
rsync=5, success=7,
suffix_count=8, suffix_hash=9, suffix_sync=10,
failure_nodes={'10.1.2.3': {'sda': 100, 'sdb': 200}})
# This is what appears in the recon dump
self.assertEqual(st.to_recon(), {
'attempted': 1,
'failure': 2,
'hashmatch': 3,
'remove': 4,
'rsync': 5,
'success': 7,
'suffix_count': 8,
'suffix_hash': 9,
'suffix_sync': 10,
'failure_nodes': {'10.1.2.3': {'sda': 100, 'sdb': 200}},
})
def test_recon_roundtrip(self):
before = object_replicator.Stats(
attempted=1, failure=2, hashmatch=3, remove=4,
rsync=5, success=7,
suffix_count=8, suffix_hash=9, suffix_sync=10,
failure_nodes={'10.1.2.3': {'sda': 100, 'sdb': 200}})
after = object_replicator.Stats.from_recon(before.to_recon())
self.assertEqual(after.attempted, before.attempted)
self.assertEqual(after.failure, before.failure)
self.assertEqual(after.hashmatch, before.hashmatch)
self.assertEqual(after.remove, before.remove)
self.assertEqual(after.rsync, before.rsync)
self.assertEqual(after.success, before.success)
self.assertEqual(after.suffix_count, before.suffix_count)
self.assertEqual(after.suffix_hash, before.suffix_hash)
self.assertEqual(after.suffix_sync, before.suffix_sync)
self.assertEqual(after.failure_nodes, before.failure_nodes)
def test_from_recon_skips_extra_fields(self):
# If another attribute ever sneaks its way in, we should ignore it.
# This will make aborted upgrades a little less painful for
# operators.
recon_dict = {'attempted': 1, 'failure': 2, 'hashmatch': 3,
'spices': 5, 'treasures': 8}
stats = object_replicator.Stats.from_recon(recon_dict)
self.assertEqual(stats.attempted, 1)
self.assertEqual(stats.failure, 2)
self.assertEqual(stats.hashmatch, 3)
# We don't gain attributes just because they're in object.recon.
self.assertFalse(hasattr(stats, 'spices'))
self.assertFalse(hasattr(stats, 'treasures'))
def test_add_failure_stats(self):
st = object_replicator.Stats()
st.add_failure_stats([('10.1.1.1', 'd10'), ('10.1.1.1', 'd11')])
st.add_failure_stats([('10.1.1.1', 'd10')])
st.add_failure_stats([('10.1.1.1', 'd12'), ('10.2.2.2', 'd20'),
('10.2.2.2', 'd21'), ('10.2.2.2', 'd21'),
('10.2.2.2', 'd21')])
self.assertEqual(st.failure, 8)
as_dict = st.to_recon()
self.assertEqual(as_dict['failure_nodes'], {
'10.1.1.1': {
'd10': 2,
'd11': 1,
'd12': 1,
},
'10.2.2.2': {
'd20': 1,
'd21': 3,
},
})
def test_add(self):
st1 = object_replicator.Stats(
attempted=1, failure=2, hashmatch=3, remove=4, rsync=5,
success=6, suffix_count=7, suffix_hash=8, suffix_sync=9,
failure_nodes={
'10.1.1.1': {'sda': 10, 'sdb': 20},
'10.1.1.2': {'sda': 10, 'sdb': 20}})
st2 = object_replicator.Stats(
attempted=2, failure=4, hashmatch=6, remove=8, rsync=10,
success=12, suffix_count=14, suffix_hash=16, suffix_sync=18,
failure_nodes={
'10.1.1.2': {'sda': 10, 'sdb': 20},
'10.1.1.3': {'sda': 10, 'sdb': 20}})
total = st1 + st2
self.assertEqual(total.attempted, 3)
self.assertEqual(total.failure, 6)
self.assertEqual(total.hashmatch, 9)
self.assertEqual(total.remove, 12)
self.assertEqual(total.rsync, 15)
self.assertEqual(total.success, 18)
self.assertEqual(total.suffix_count, 21)
self.assertEqual(total.suffix_hash, 24)
self.assertEqual(total.suffix_sync, 27)
self.assertEqual(total.failure_nodes, {
'10.1.1.1': {'sda': 10, 'sdb': 20},
'10.1.1.2': {'sda': 20, 'sdb': 40},
'10.1.1.3': {'sda': 10, 'sdb': 20},
})
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_replicator.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import shutil
import tempfile
import unittest
import eventlet
import mock
import six
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import swob
from swift.common.storage_policy import POLICIES
from swift.common import utils
from swift.common.swob import HTTPException
from swift.obj import diskfile
from swift.obj import server
from swift.obj import ssync_receiver, ssync_sender
from swift.obj.reconstructor import ObjectReconstructor
from test import listen_zero, unit
from test.debug_logger import debug_logger
from test.unit import (patch_policies, make_timestamp_iter, mock_check_drive,
skip_if_no_xattrs)
from test.unit.obj.common import write_diskfile
if six.PY2:
UNPACK_ERR = b":ERROR: 0 'need more than 1 value to unpack'"
else:
UNPACK_ERR = b":ERROR: 0 'not enough values to unpack (expected 2, got 1)'"
@unit.patch_policies()
class TestReceiver(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
# Not sure why the test.unit stuff isn't taking effect here; so I'm
# reinforcing it.
self.testdir = os.path.join(
tempfile.mkdtemp(), 'tmp_test_ssync_receiver')
utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.conf = {
'devices': self.testdir,
'mount_check': 'false',
'replication_concurrency_per_device': '0',
'log_requests': 'false'}
utils.mkdirs(os.path.join(self.testdir, 'device', 'partition'))
self.logger = debug_logger()
self.controller = server.ObjectController(
self.conf, logger=self.logger)
self.controller.bytes_per_sync = 1
self.account1 = 'a'
self.container1 = 'c'
self.object1 = 'o1'
self.name1 = '/' + '/'.join((
self.account1, self.container1, self.object1))
self.hash1 = utils.hash_path(
self.account1, self.container1, self.object1)
self.ts1 = '1372800001.00000'
self.metadata1 = {
'name': self.name1,
'X-Timestamp': self.ts1,
'Content-Length': '0'}
self.account2 = 'a'
self.container2 = 'c'
self.object2 = 'o2'
self.name2 = '/' + '/'.join((
self.account2, self.container2, self.object2))
self.hash2 = utils.hash_path(
self.account2, self.container2, self.object2)
self.ts2 = '1372800002.00000'
self.metadata2 = {
'name': self.name2,
'X-Timestamp': self.ts2,
'Content-Length': '0'}
def tearDown(self):
shutil.rmtree(os.path.dirname(self.testdir))
def body_lines(self, body):
lines = []
for line in body.split(b'\n'):
line = line.strip()
if line:
lines.append(line)
return lines
def test_SSYNC_semaphore_locked(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
self.controller.logger = mock.MagicMock()
mocked_replication_semaphore.acquire.return_value = False
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
if six.PY2:
last_line = (
b":ERROR: 503 '<html><h1>Service Unavailable</h1><p>The "
b"server is currently unavailable. Please try again at a "
b"later time.</p></html>'")
else:
last_line = (
b":ERROR: 503 b'<html><h1>Service Unavailable</h1><p>The "
b"server is currently unavailable. Please try again at a "
b"later time.</p></html>'")
self.assertEqual(
self.body_lines(resp.body),
[last_line])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_SSYNC_calls_replication_lock(self):
with mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'replication_lock') as mocked_replication_lock:
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
mocked_replication_lock.assert_called_once_with('sda1',
POLICIES.legacy,
'1')
def test_Receiver_with_default_storage_policy(self):
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[0])
def test_Receiver_with_storage_policy_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertIsNone(rcvr.frag_index)
def test_Receiver_with_bad_storage_policy_index_header(self):
valid_indices = sorted([int(policy) for policy in POLICIES])
bad_index = valid_indices[-1] + 1
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '0',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': bad_index},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
self.controller.logger = mock.MagicMock()
with self.assertRaises(HTTPException) as caught:
ssync_receiver.Receiver(self.controller, req)
self.assertEqual('503 Service Unavailable', caught.exception.status)
self.assertEqual(b'No policy with index 2', caught.exception.body)
@unit.patch_policies()
def test_Receiver_with_only_frag_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertEqual(rcvr.frag_index, 7)
@unit.patch_policies()
def test_Receiver_with_only_node_index_header(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
# we used to require the reconstructor to send the frag_index twice as
# two different headers because of evolutionary reasons, now we ignore
# node_index
self.assertEqual(rcvr.frag_index, None)
@unit.patch_policies()
def test_Receiver_with_matched_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
self.assertEqual(rcvr.frag_index, 7)
@unit.patch_policies()
def test_Receiver_with_invalid_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': 'None',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': 'None',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
@unit.patch_policies()
def test_Receiver_with_mismatched_indexes(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '6',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
rcvr = ssync_receiver.Receiver(self.controller, req)
body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()]
self.assertEqual(
body_lines,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(rcvr.policy, POLICIES[1])
# node_index if provided should always match frag_index; but if they
# differ, frag_index takes precedence
self.assertEqual(rcvr.frag_index, 7)
def test_SSYNC_replication_lock_fail(self):
def _mock(path, policy, partition):
with exceptions.ReplicationLockTimeout(0.01, '/somewhere/' + path):
eventlet.sleep(0.05)
with mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'replication_lock', _mock):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b":ERROR: 0 '0.01 seconds: /somewhere/sda1'"])
self.controller.logger.debug.assert_called_once_with(
'None/sda1/1 SSYNC LOCK TIMEOUT: 0.01 seconds: '
'/somewhere/sda1')
def test_SSYNC_replication_lock_per_partition(self):
def _concurrent_ssync(path1, path2):
env = {'REQUEST_METHOD': 'SSYNC'}
body = ':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n' \
':UPDATES: START\r\n:UPDATES: END\r\n'
req1 = swob.Request.blank(path1, environ=env, body=body)
req2 = swob.Request.blank(path2, environ=env, body=body)
rcvr1 = ssync_receiver.Receiver(self.controller, req1)
rcvr2 = ssync_receiver.Receiver(self.controller, req2)
body_lines1 = []
body_lines2 = []
for chunk1, chunk2 in six.moves.zip_longest(rcvr1(), rcvr2()):
if chunk1 and chunk1.strip():
body_lines1.append(chunk1.strip())
if chunk2 and chunk2.strip():
body_lines2.append(chunk2.strip())
return body_lines1, body_lines2
self.controller._diskfile_router[POLICIES[0]]\
.replication_lock_timeout = 0.01
self.controller._diskfile_router[POLICIES[0]]\
.replication_concurrency_per_device = 2
# It should be possible to lock two different partitions
body_lines1, body_lines2 = _concurrent_ssync('/sda1/1', '/sda1/2')
self.assertEqual(
body_lines1,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(
body_lines2,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
# It should not be possible to lock the same partition twice
body_lines1, body_lines2 = _concurrent_ssync('/sda1/1', '/sda1/1')
self.assertEqual(
body_lines1,
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertRegex(
b''.join(body_lines2),
br"^:ERROR: 0 '0\.0[0-9]+ seconds: "
br"/.+/sda1/objects/1/.lock-replication'$")
def test_SSYNC_initial_path(self):
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b"Invalid path: /device"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b"Invalid path: /device/"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(resp.body, b'\r\n')
self.assertEqual(resp.status_int, 200)
mocked_replication_semaphore.acquire.assert_called_once_with(0)
mocked_replication_semaphore.release.assert_called_once_with()
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(['ssync client disconnected'], error_lines)
with mock.patch.object(
self.controller, 'replication_semaphore') as \
mocked_replication_semaphore:
req = swob.Request.blank(
'/device/partition/junk',
environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b"Invalid path: /device/partition/junk"])
self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
def test_SSYNC_mount_check_isdir(self):
with mock.patch.object(self.controller, 'replication_semaphore'), \
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', False), \
mock_check_drive(isdir=True) as mocks:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(resp.body, b'\r\n')
self.assertEqual(resp.status_int, 200)
self.assertEqual([], mocks['ismount'].call_args_list)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(['ssync client disconnected'], error_lines)
def test_SSYNC_mount_check(self):
with mock.patch.object(self.controller, 'replication_semaphore'), \
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', True), \
mock_check_drive(ismount=False) as mocks:
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b"<html><h1>Insufficient Storage</h1><p>There "
b"was not enough space to save the resource. Drive: "
b"device</p></html>"])
self.assertEqual(resp.status_int, 507)
self.assertEqual([mock.call(os.path.join(
self.controller._diskfile_router[POLICIES.legacy].devices,
'device'))], mocks['ismount'].call_args_list)
mocks['ismount'].reset_mock()
mocks['ismount'].return_value = True
req = swob.Request.blank(
'/device/partition', environ={'REQUEST_METHOD': 'SSYNC'})
resp = req.get_response(self.controller)
self.assertEqual(resp.body, b'\r\n')
self.assertEqual(resp.status_int, 200)
self.assertEqual([mock.call(os.path.join(
self.controller._diskfile_router[POLICIES.legacy].devices,
'device'))] * 2, mocks['ismount'].call_args_list)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(['ssync client disconnected'], error_lines)
def test_SSYNC_Exception(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = '1.2.3.4'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'Got no headers for Bad content is here'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'1.2.3.4/device/partition EXCEPTION in ssync.Receiver')
def test_SSYNC_Exception_Exception(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\nBad content is here')
req.remote_addr = mock.MagicMock()
req.remote_addr.__str__ = mock.Mock(
side_effect=Exception("can't stringify this"))
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END'])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'EXCEPTION in ssync.Receiver')
def test_MISSING_CHECK_timeout(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = io.BytesIO.readline(self)
if line.startswith(b'hash'):
eventlet.sleep(0.1)
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
req.remote_addr = '2.3.4.5'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b":ERROR: 408 '0.01 seconds: missing_check line'"])
self.assertEqual(resp.status_int, 200)
self.assertTrue(mock_shutdown_safe.called)
self.controller.logger.error.assert_called_once_with(
'2.3.4.5/sda1/1 TIMEOUT in ssync.Receiver: '
'0.01 seconds: missing_check line')
def test_MISSING_CHECK_other_exception(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = io.BytesIO.readline(self)
if line.startswith(b'hash'):
raise Exception('test exception')
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
'hash ts\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
req.remote_addr = '3.4.5.6'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b":ERROR: 0 'test exception'"])
self.assertEqual(resp.status_int, 200)
self.assertTrue(mock_shutdown_safe.called)
self.controller.logger.exception.assert_called_once_with(
'3.4.5.6/sda1/1 EXCEPTION in ssync.Receiver')
def test_MISSING_CHECK_empty_list(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_none(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' dm').encode('ascii'),
(self.hash2 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_extra_line_parts(self):
# check that rx tolerates extra parts in missing check lines to
# allow for protocol upgrades
extra_1 = 'extra'
extra_2 = 'multiple extra parts'
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + ' ' + extra_1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + ' ' + extra_2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' dm').encode('ascii'),
(self.hash2 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_one_exact(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash2 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_missing_meta_expired_data(self):
# verify that even when rx disk file has expired x-delete-at, it will
# still be opened and checked for missing meta
self.controller.logger = mock.MagicMock()
ts1 = next(make_timestamp_iter())
df = self.controller.get_diskfile(
'sda1', '1', self.account1, self.container1, self.object1,
POLICIES[0])
write_diskfile(df, ts1, extra_metadata={'X-Delete-At': 0})
# make a request - expect newer metadata to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1.internal + ' m:30d40\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
b'c2519f265f9633e74f9b2fe3b9bec27d m',
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
@patch_policies(with_ec_default=True)
def test_MISSING_CHECK_missing_durable(self):
# check that local non-durable frag is made durable if remote sends
# same ts for same frag, but only if remote is durable
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
# make rx disk file but don't commit it, so durable state is missing
ts1 = next(make_timestamp_iter()).internal
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts1,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
self.assertEqual([ts1 + '#2.data'], os.listdir(object_dir)) # sanity
# offer same non-durable frag - expect no data to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + ' durable:no\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# the local frag is still not durable...
self.assertEqual([ts1 + '#2.data'], os.listdir(object_dir))
# offer same frag but durable - expect no data to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# the local frag is now durable...
self.assertEqual([ts1 + '#2#d.data'], os.listdir(object_dir))
@patch_policies(with_ec_default=True)
@mock.patch('swift.obj.diskfile.ECDiskFileWriter.commit')
def test_MISSING_CHECK_missing_durable_but_commit_fails(self, mock_commit):
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
# make rx disk file but don't commit it, so durable state is missing
ts1 = next(make_timestamp_iter()).internal
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts1 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts1,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
self.assertEqual([ts1 + '#2.data'], os.listdir(object_dir)) # sanity
# make a request with commit disabled - expect data to be wanted
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# make a request with commit raising error - expect data to be wanted
mock_commit.side_effect = Exception
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.exception.called)
self.assertIn(
'EXCEPTION in ssync.Receiver while attempting commit of',
self.controller.logger.exception.call_args[0][0])
@patch_policies(with_ec_default=True)
def test_MISSING_CHECK_local_non_durable(self):
# check that local non-durable fragment does not prevent other frags
# being wanted from the sender
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
ts_iter = make_timestamp_iter()
ts1 = next(ts_iter).internal
ts2 = next(ts_iter).internal
ts3 = next(ts_iter).internal
# make non-durable rx disk file at ts2
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts2 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts2,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir)) # sanity
def do_check(tx_missing_line, expected_rx_missing_lines):
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
tx_missing_line + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START'] +
[l.encode('ascii') for l in expected_rx_missing_lines] +
[b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# check remote frag is always wanted - older, newer, durable or not...
do_check(self.hash1 + ' ' + ts1 + ' durable:no',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts1 + ' durable:yes',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts1, [self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3 + ' durable:no',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3 + ' durable:yes',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3, [self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
# ... except when at same timestamp
do_check(self.hash1 + ' ' + ts2 + ' durable:no', [])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
# durable remote frag at ts2 will make the local durable..
do_check(self.hash1 + ' ' + ts2 + ' durable:yes', [])
self.assertEqual([ts2 + '#2#d.data'], os.listdir(object_dir))
@patch_policies(with_ec_default=True)
def test_MISSING_CHECK_local_durable(self):
# check that local durable fragment does not prevent newer non-durable
# frags being wanted from the sender
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
ts_iter = make_timestamp_iter()
ts1 = next(ts_iter).internal
ts2 = next(ts_iter).internal
ts3 = next(ts_iter).internal
# make non-durable rx disk file at ts2
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts2 + '#2.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts2,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir)) # sanity
def do_check(tx_missing_line, expected_rx_missing_lines):
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
tx_missing_line + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START'] +
[l.encode('ascii') for l in expected_rx_missing_lines] +
[b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
# check remote frag is always wanted - older, newer, durable or not...
do_check(self.hash1 + ' ' + ts1 + ' durable:no',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts1 + ' durable:yes',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts1, [self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3 + ' durable:no',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3 + ' durable:yes',
[self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
do_check(self.hash1 + ' ' + ts3, [self.hash1 + ' dm'])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
# ... except when at same timestamp
do_check(self.hash1 + ' ' + ts2 + ' durable:no', [])
self.assertEqual([ts2 + '#2.data'], os.listdir(object_dir))
# durable remote frag at ts2 will make the local durable..
do_check(self.hash1 + ' ' + ts2 + ' durable:yes', [])
self.assertEqual([ts2 + '#2#d.data'], os.listdir(object_dir))
@patch_policies(with_ec_default=True)
def test_MISSING_CHECK_local_durable_older_than_remote_non_durable(self):
# check that newer non-durable fragment is wanted
self.controller.logger = mock.MagicMock()
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
ts_iter = make_timestamp_iter()
ts1 = next(ts_iter).internal
ts2 = next(ts_iter).internal
# make durable rx disk file at ts2
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, ts1 + '#2#d.data'), 'w+')
fp.write('1')
fp.flush()
metadata1 = {
'name': self.name1,
'X-Timestamp': ts1,
'Content-Length': '1'}
diskfile.write_metadata(fp, metadata1)
# make a request offering non-durable at ts2
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '2'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + ts2 + ' durable:no\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_storage_policy(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[1])),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash2 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_one_newer(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
newer_ts1 = utils.normalize_timestamp(float(self.ts1) + 1)
self.metadata1['X-Timestamp'] = newer_ts1
fp = open(os.path.join(object_dir, newer_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n' +
self.hash2 + ' ' + self.ts2 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash2 + ' dm').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_newer_meta(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)
self.metadata1['X-Timestamp'] = older_ts1
fp = open(os.path.join(object_dir, older_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
# write newer .meta file
metadata = {'name': self.name1, 'X-Timestamp': self.ts2,
'X-Object-Meta-Test': 'test'}
fp = open(os.path.join(object_dir, self.ts2 + '.meta'), 'w+')
diskfile.write_metadata(fp, metadata)
# receiver has .data at older_ts, .meta at ts2
# sender has .data at ts1
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + self.ts1 + '\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' d').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_MISSING_CHECK_have_older_meta(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1',
diskfile.get_data_dir(POLICIES[0])),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)
self.metadata1['X-Timestamp'] = older_ts1
fp = open(os.path.join(object_dir, older_ts1 + '.data'), 'w+')
fp.write('1')
fp.flush()
self.metadata1['Content-Length'] = '1'
diskfile.write_metadata(fp, self.metadata1)
# write .meta file at ts1
metadata = {'name': self.name1, 'X-Timestamp': self.ts1,
'X-Object-Meta-Test': 'test'}
fp = open(os.path.join(object_dir, self.ts1 + '.meta'), 'w+')
diskfile.write_metadata(fp, metadata)
# receiver has .data at older_ts, .meta at ts1
# sender has .data at older_ts, .meta at ts2
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/sda1/1',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n' +
self.hash1 + ' ' + older_ts1 + ' m:30d40\r\n'
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START',
(self.hash1 + ' m').encode('ascii'),
b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.error.called)
self.assertFalse(self.controller.logger.exception.called)
def test_UPDATES_no_start(self):
# verify behavior when the sender disconnects and does not send
# ':UPDATES: START' e.g. if a sender timeout pops while waiting for
# receiver response to missing checks
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n')
req.remote_addr = '2.3.4.5'
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END'])
self.assertEqual(resp.status_int, 200)
self.controller.logger.error.assert_called_once_with(
'ssync client disconnected')
def test_UPDATES_timeout(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = io.BytesIO.readline(self)
if line.startswith(b'DELETE'):
eventlet.sleep(0.1)
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
req.remote_addr = '2.3.4.5'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 408 '0.01 seconds: updates line'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.error.assert_called_once_with(
'2.3.4.5/device/partition TIMEOUT in ssync.Receiver: '
'0.01 seconds: updates line')
def test_UPDATES_other_exception(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def readline(self, sizehint=-1):
line = io.BytesIO.readline(self)
if line.startswith(b'DELETE'):
raise Exception('test exception')
return line
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(
ssync_receiver.eventlet.greenio, 'shutdown_safe') as \
mock_shutdown_safe:
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
req.remote_addr = '3.4.5.6'
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'test exception'"])
self.assertEqual(resp.status_int, 200)
mock_shutdown_safe.assert_called_once_with(
mock_wsgi_input.mock_socket)
mock_wsgi_input.mock_socket.close.assert_called_once_with()
self.controller.logger.exception.assert_called_once_with(
'3.4.5.6/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_problems_no_hard_disconnect(self):
class _Wrapper(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
self.mock_socket = mock.MagicMock()
def get_socket(self):
return self.mock_socket
self.controller.client_timeout = 0.01
with mock.patch.object(ssync_receiver.eventlet.greenio,
'shutdown_safe') as mock_shutdown_safe, \
mock.patch.object(
self.controller, 'DELETE',
return_value=swob.HTTPNoContent()):
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
':UPDATES: END\r\n')
mock_wsgi_input = _Wrapper(req.body)
req.environ['wsgi.input'] = mock_wsgi_input
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(mock_shutdown_safe.called)
self.assertFalse(mock_wsgi_input.mock_socket.close.called)
def test_UPDATES_bad_subrequest_line(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'bad_subrequest_line\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
UNPACK_ERR])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
with mock.patch.object(
self.controller, 'DELETE',
return_value=swob.HTTPNoContent()):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n'
'bad_subrequest_line2')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
UNPACK_ERR])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_headers(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'Got no headers for DELETE /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_bad_headers(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Bad-Header Test\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
UNPACK_ERR])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Good-Header: Test\r\n'
'Bad-Header Test\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
UNPACK_ERR])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_bad_content_length(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: a\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':ERROR: 0 "invalid literal for int() with base 10: \'a\'"'])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_content_length_with_DELETE(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'Content-Length: 1\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'DELETE subrequest with content-length /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_no_content_length_with_PUT(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'No content-length sent for PUT /a/c/o'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
def test_UPDATES_early_termination(self):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END'])
self.assertEqual(resp.status_int, 200)
self.controller.logger.error.assert_called_once_with(
'None/device/partition read failed in ssync.Receiver: '
'Early termination for PUT /a/c/o')
def test_UPDATES_failures(self):
@server.public
def _DELETE(request):
if request.path == '/device/partition/a/c/works':
return swob.HTTPNoContent()
else:
return swob.HTTPInternalServerError()
# failures never hit threshold
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n')
resp = req.get_response(self.controller)
if six.PY2:
final_line = (b":ERROR: 500 'ERROR: With :UPDATES: "
b"3 failures to 0 successes'")
else:
final_line = (b":ERROR: 500 b'ERROR: With :UPDATES: "
b"3 failures to 0 successes'")
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END', final_line])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(3, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and no successes, so ratio is like infinity
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'Too many 4 failures to 0 successes'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and ratio hits 1.33333333333
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
if six.PY2:
final_line = (b":ERROR: 500 'ERROR: With :UPDATES: "
b"4 failures to 3 successes'")
else:
final_line = (b":ERROR: 500 b'ERROR: With :UPDATES: "
b"4 failures to 3 successes'")
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
final_line])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
# failures hit threshold and ratio hits 2.0
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.replication_failure_threshold = 4
self.controller.replication_failure_ratio = 1.5
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/works\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
'DELETE /a/c/o\r\n\r\n'
':UPDATES: END\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'Too many 4 failures to 2 successes'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(4, self.controller.logger.warning.call_count)
self.controller.logger.clear()
def test_UPDATES_PUT(self):
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=b':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
b':UPDATES: START\r\n'
b'PUT /a/c/o\r\n'
b'Content-Length: 1\r\n'
b'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n'
b'X-Timestamp: 1364456113.12344\r\n'
b'X-Object-Meta-Test1: one\r\n'
b'X-Object-Meta-T\xc3\xa8st2: m\xc3\xa8ta\r\n'
b'Content-Encoding: gzip\r\n'
b'Specialty-Header: value\r\n'
b'X-Backend-No-Commit: True\r\n'
b'\r\n'
b'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
expected = {
'Etag': 'c4ca4238a0b923820dcc509a6f75849b',
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'X-Object-Meta-T\xc3\xa8st2': 'm\xc3\xa8ta',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'X-Backend-No-Commit': 'True',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
# note: Etag and X-Backend-No-Commit not in replication-headers
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'x-object-meta-t\xc3\xa8st2 content-encoding '
'specialty-header')}
self.assertEqual({k: req.headers[k] for k in expected}, expected)
def test_UPDATES_PUT_replication_headers(self):
self.controller.logger = mock.MagicMock()
# sanity check - regular PUT will not persist Specialty-Header or
# X-Backend-No-Commit
req = swob.Request.blank(
'/sda1/0/a/c/o1', body='1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '1',
'Content-Type': 'text/plain',
'Etag': 'c4ca4238a0b923820dcc509a6f75849b',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'X-Backend-No-Commit': 'False',
'Specialty-Header': 'value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
df = self.controller.get_diskfile(
'sda1', '0', 'a', 'c', 'o1', POLICIES.default)
df.open()
self.assertFalse('Specialty-Header' in df.get_metadata())
self.assertFalse('X-Backend-No-Commit' in df.get_metadata())
# an SSYNC request can override PUT header filtering...
req = swob.Request.blank(
'/sda1/0',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o2\r\n'
'Content-Length: 1\r\n'
'Content-Type: text/plain\r\n'
'Etag: c4ca4238a0b923820dcc509a6f75849b\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'X-Backend-No-Commit: False\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
# verify diskfile has metadata permitted by replication headers
# including Specialty-Header, but not Etag or X-Backend-No-Commit
df = self.controller.get_diskfile(
'sda1', '0', 'a', 'c', 'o2', POLICIES.default)
df.open()
for chunk in df.reader():
self.assertEqual(b'1', chunk)
expected = {'ETag': 'c4ca4238a0b923820dcc509a6f75849b',
'Content-Length': '1',
'Content-Type': 'text/plain',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'name': '/a/c/o2'}
actual = df.get_metadata()
self.assertEqual(expected, actual)
def test_UPDATES_POST(self):
_POST_request = [None]
@server.public
def _POST(request):
_POST_request[0] = request
return swob.HTTPAccepted()
with mock.patch.object(self.controller, 'POST', _POST):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'POST /a/c/o\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Specialty-Header: value\r\n\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
req = _POST_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertIsNone(req.content_length)
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'x-timestamp x-object-meta-test1 specialty-header')})
def test_UPDATES_with_storage_policy(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, b'1')
def test_UPDATES_PUT_with_storage_policy_and_node_index(self):
# update router post policy patch
self.controller._diskfile_router = diskfile.DiskFileRouter(
self.conf, self.controller.logger)
_PUT_request = [None]
@server.public
def _PUT(request):
_PUT_request[0] = request
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
with mock.patch.object(self.controller, 'PUT', _PUT):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC',
'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7',
'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7',
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.12344\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_PUT_request), 1) # sanity
req = _PUT_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.12344',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Ssync-Frag-Index': '7',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, b'1')
def test_UPDATES_DELETE(self):
_DELETE_request = [None]
@server.public
def _DELETE(request):
_DELETE_request[0] = request
return swob.HTTPNoContent()
with mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_DELETE_request), 1) # sanity
req = _DELETE_request[0]
self.assertEqual(req.path, '/device/partition/a/c/o')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.76334',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
def test_UPDATES_BONK(self):
_BONK_request = [None]
@server.public
def _BONK(request):
_BONK_request[0] = request
return swob.HTTPOk()
self.controller.BONK = _BONK
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'BONK /a/c/o\r\n'
'X-Timestamp: 1364456113.76334\r\n'
'\r\n')
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b":ERROR: 0 'Invalid subrequest method BONK'"])
self.assertEqual(resp.status_int, 200)
self.controller.logger.exception.assert_called_once_with(
'None/device/partition EXCEPTION in ssync.Receiver')
self.assertEqual(len(_BONK_request), 1) # sanity
self.assertIsNone(_BONK_request[0])
def test_UPDATES_multiple(self):
_requests = []
@server.public
def _PUT(request):
_requests.append(request)
request.read_body = request.environ['wsgi.input'].read()
return swob.HTTPCreated()
@server.public
def _POST(request):
_requests.append(request)
return swob.HTTPOk()
@server.public
def _DELETE(request):
_requests.append(request)
return swob.HTTPNoContent()
with mock.patch.object(self.controller, 'PUT', _PUT), \
mock.patch.object(self.controller, 'POST', _POST), \
mock.patch.object(self.controller, 'DELETE', _DELETE):
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.00001\r\n'
'X-Object-Meta-Test1: one\r\n'
'Content-Encoding: gzip\r\n'
'Specialty-Header: value\r\n'
'\r\n'
'1'
'DELETE /a/c/o2\r\n'
'X-Timestamp: 1364456113.00002\r\n'
'\r\n'
'PUT /a/c/o3\r\n'
'Content-Length: 3\r\n'
'X-Timestamp: 1364456113.00003\r\n'
'\r\n'
'123'
'PUT /a/c/o4\r\n'
'Content-Length: 4\r\n'
'X-Timestamp: 1364456113.00004\r\n'
'\r\n'
'1\r\n4'
'DELETE /a/c/o5\r\n'
'X-Timestamp: 1364456113.00005\r\n'
'\r\n'
'DELETE /a/c/o6\r\n'
'X-Timestamp: 1364456113.00006\r\n'
'\r\n'
'PUT /a/c/o7\r\n'
'Content-Length: 7\r\n'
'X-Timestamp: 1364456113.00007\r\n'
'\r\n'
'1234567'
'POST /a/c/o7\r\n'
'X-Object-Meta-Test-User: user_meta\r\n'
'X-Timestamp: 1364456113.00008\r\n'
'\r\n'
)
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END',
b':UPDATES: START', b':UPDATES: END'])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertEqual(len(_requests), 8) # sanity
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o1')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.00001',
'X-Object-Meta-Test1': 'one',
'Content-Encoding': 'gzip',
'Specialty-Header': 'value',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp x-object-meta-test1 '
'content-encoding specialty-header')})
self.assertEqual(req.read_body, b'1')
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o2')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00002',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o3')
self.assertEqual(req.content_length, 3)
self.assertEqual(req.headers, {
'Content-Length': '3',
'X-Timestamp': '1364456113.00003',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, b'123')
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o4')
self.assertEqual(req.content_length, 4)
self.assertEqual(req.headers, {
'Content-Length': '4',
'X-Timestamp': '1364456113.00004',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, b'1\r\n4')
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o5')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00005',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'DELETE')
self.assertEqual(req.path, '/device/partition/a/c/o6')
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00006',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': 'x-timestamp'})
req = _requests.pop(0)
self.assertEqual(req.method, 'PUT')
self.assertEqual(req.path, '/device/partition/a/c/o7')
self.assertEqual(req.content_length, 7)
self.assertEqual(req.headers, {
'Content-Length': '7',
'X-Timestamp': '1364456113.00007',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, b'1234567')
req = _requests.pop(0)
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/device/partition/a/c/o7')
self.assertIsNone(req.content_length)
self.assertEqual(req.headers, {
'X-Timestamp': '1364456113.00008',
'X-Object-Meta-Test-User': 'user_meta',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'x-object-meta-test-user x-timestamp')})
self.assertEqual(_requests, [])
def test_UPDATES_subreq_does_not_read_all(self):
# This tests that if a SSYNC subrequest fails and doesn't read
# all the subrequest body that it will read and throw away the rest of
# the body before moving on to the next subrequest.
# If you comment out the part in ssync_receiver where it does:
# for junk in subreq.environ['wsgi.input']:
# pass
# You can then see this test fail.
_requests = []
@server.public
def _PUT(request):
_requests.append(request)
# Deliberately just reading up to first 2 bytes.
request.read_body = request.environ['wsgi.input'].read(2)
return swob.HTTPInternalServerError()
class _IgnoreReadlineHint(io.BytesIO):
def __init__(self, value):
io.BytesIO.__init__(self, value)
def readline(self, hint=-1):
return io.BytesIO.readline(self)
self.controller.PUT = _PUT
self.controller.network_chunk_size = 2
self.controller.logger = mock.MagicMock()
req = swob.Request.blank(
'/device/partition',
environ={'REQUEST_METHOD': 'SSYNC'},
body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n'
':UPDATES: START\r\n'
'PUT /a/c/o1\r\n'
'Content-Length: 3\r\n'
'X-Timestamp: 1364456113.00001\r\n'
'\r\n'
'123'
'PUT /a/c/o2\r\n'
'Content-Length: 1\r\n'
'X-Timestamp: 1364456113.00002\r\n'
'\r\n'
'1')
req.environ['wsgi.input'] = _IgnoreReadlineHint(req.body)
resp = req.get_response(self.controller)
if six.PY2:
final_line = (b":ERROR: 500 'ERROR: With :UPDATES: "
b"2 failures to 0 successes'")
else:
final_line = (b":ERROR: 500 b'ERROR: With :UPDATES: "
b"2 failures to 0 successes'")
self.assertEqual(
self.body_lines(resp.body),
[b':MISSING_CHECK: START', b':MISSING_CHECK: END', final_line])
self.assertEqual(resp.status_int, 200)
self.assertFalse(self.controller.logger.exception.called)
self.assertFalse(self.controller.logger.error.called)
self.assertTrue(self.controller.logger.warning.called)
self.assertEqual(2, self.controller.logger.warning.call_count)
self.assertEqual(len(_requests), 2) # sanity
req = _requests.pop(0)
self.assertEqual(req.path, '/device/partition/a/c/o1')
self.assertEqual(req.content_length, 3)
self.assertEqual(req.headers, {
'Content-Length': '3',
'X-Timestamp': '1364456113.00001',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, b'12')
req = _requests.pop(0)
self.assertEqual(req.path, '/device/partition/a/c/o2')
self.assertEqual(req.content_length, 1)
self.assertEqual(req.headers, {
'Content-Length': '1',
'X-Timestamp': '1364456113.00002',
'Host': 'localhost:80',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Replication': 'True',
'X-Backend-Replication-Headers': (
'content-length x-timestamp')})
self.assertEqual(req.read_body, b'1')
self.assertEqual(_requests, [])
@patch_policies(with_ec_default=True)
class TestSsyncRxServer(unittest.TestCase):
# Tests to verify behavior of SSYNC requests sent to an object
# server socket.
def setUp(self):
skip_if_no_xattrs()
# dirs
self.tmpdir = tempfile.mkdtemp()
self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server')
self.devices = os.path.join(self.tempdir, 'srv/node')
for device in ('sda1', 'sdb1'):
os.makedirs(os.path.join(self.devices, device))
self.conf = {
'devices': self.devices,
'mount_check': 'false',
'swift_dir': self.tempdir,
}
self.rx_logger = debug_logger('test-object-server')
rx_server = server.ObjectController(self.conf, logger=self.rx_logger)
self.rx_ip = '127.0.0.1'
self.sock = listen_zero()
self.rx_server = eventlet.spawn(
eventlet.wsgi.server, self.sock, rx_server, utils.NullLogger())
self.rx_port = self.sock.getsockname()[1]
self.tx_logger = debug_logger('test-reconstructor')
self.daemon = ObjectReconstructor(self.conf, self.tx_logger)
self.daemon._diskfile_mgr = self.daemon._df_router[POLICIES[0]]
def tearDown(self):
self.rx_server.kill()
self.sock.close()
eventlet.sleep(0)
shutil.rmtree(self.tmpdir)
def test_SSYNC_disconnect(self):
node = {
'replication_ip': '127.0.0.1',
'replication_port': self.rx_port,
'device': 'sdb1',
}
job = {
'partition': 0,
'policy': POLICIES[0],
'device': 'sdb1',
}
sender = ssync_sender.Sender(self.daemon, node, job, ['abc'])
# kick off the sender and let the error trigger failure
with mock.patch(
'swift.obj.ssync_receiver.Receiver.initialize_request') \
as mock_initialize_request:
mock_initialize_request.side_effect = \
swob.HTTPInternalServerError()
success, _ = sender()
self.assertFalse(success)
stderr = six.StringIO()
with mock.patch('sys.stderr', stderr):
# let gc and eventlet spin a bit
del sender
for i in range(3):
eventlet.sleep(0)
self.assertNotIn('ValueError: invalid literal for int() with base 16',
stderr.getvalue())
def test_SSYNC_device_not_available(self):
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sdc1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(POLICIES[0]))
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(507, resp.status)
resp.read()
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
def test_SSYNC_read_error(self):
# verify that read errors from wsgi reader are caught and reported
def do_send(data):
self.rx_logger.clear()
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sda1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(POLICIES[0]))
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(200, resp.status)
resp.close()
self.connection.send(data)
self.connection.close()
for sleep_time in (0, 0.1, 1):
lines = self.rx_logger.get_lines_for_level('error')
if lines:
return lines
eventlet.sleep(sleep_time)
return []
# check read errors during missing_check phase
error_lines = do_send(b'')
self.assertEqual(1, len(error_lines))
self.assertIn('missing_check start: invalid literal', error_lines[0])
error_lines = do_send(b'1\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('missing_check start: unexpected end of file',
error_lines[0])
error_lines = do_send(b'17\r\n:MISSING_CHECK: START\r\n\r\nx\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('missing_check line: invalid literal', error_lines[0])
error_lines = do_send(b'17\r\n:MISSING_CHECK: START\r\n\r\n12\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('missing_check line: unexpected end of file',
error_lines[0])
# check read errors during updates phase
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check'):
error_lines = do_send(b'')
self.assertEqual(1, len(error_lines))
self.assertIn('updates start: invalid literal', error_lines[0])
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check'):
error_lines = do_send(b'1\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('updates start: unexpected end of file', error_lines[0])
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check'):
error_lines = do_send(b'11\r\n:UPDATES: START\r\n\r\nx\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('updates line: invalid literal', error_lines[0])
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check'):
error_lines = do_send(b'11\r\n:UPDATES: START\r\n\r\n12\r\n')
self.assertEqual(1, len(error_lines))
self.assertIn('updates line: unexpected end of file', error_lines[0])
def test_SSYNC_invalid_policy(self):
valid_indices = sorted([int(policy) for policy in POLICIES])
bad_index = valid_indices[-1] + 1
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sda1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
bad_index)
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(503, resp.status)
resp.read()
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
def test_bad_request_invalid_frag_index(self):
with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
as mock_missing_check:
self.connection = bufferedhttp.BufferedHTTPConnection(
'127.0.0.1:%s' % self.rx_port)
self.connection.putrequest('SSYNC', '/sda1/0')
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Ssync-Frag-Index',
'None')
self.connection.endheaders()
resp = self.connection.getresponse()
self.assertEqual(400, resp.status)
error_msg = resp.read()
self.assertIn(b"Invalid X-Backend-Ssync-Frag-Index 'None'", error_msg)
resp.close()
# sanity check that the receiver did not proceed to missing_check
self.assertFalse(mock_missing_check.called)
class TestModuleMethods(unittest.TestCase):
def test_decode_missing(self):
object_hash = '9d41d8cd98f00b204e9800998ecf0abc'
ts_iter = make_timestamp_iter()
t_data = next(ts_iter)
t_meta = next(ts_iter)
t_ctype = next(ts_iter)
d_meta_data = t_meta.raw - t_data.raw
d_ctype_data = t_ctype.raw - t_data.raw
# legacy single timestamp string
msg = '%s %s' % (object_hash, t_data.internal)
expected = dict(object_hash=object_hash,
ts_meta=t_data,
ts_data=t_data,
ts_ctype=t_data,
durable=True)
self.assertEqual(expected,
ssync_receiver.decode_missing(msg.encode('ascii')))
# hex meta delta encoded as extra message part
msg = '%s %s m:%x' % (object_hash, t_data.internal, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_data,
durable=True)
self.assertEqual(expected,
ssync_receiver.decode_missing(msg.encode('ascii')))
# hex content type delta encoded in extra message part
msg = '%s %s t:%x,m:%x' % (object_hash, t_data.internal,
d_ctype_data, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_ctype,
durable=True)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
# order of subparts does not matter
msg = '%s %s m:%x,t:%x' % (object_hash, t_data.internal,
d_meta_data, d_ctype_data)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
# timestamps have offsets
t_data_offset = utils.Timestamp(t_data, offset=99)
t_meta_offset = utils.Timestamp(t_meta, offset=1)
t_ctype_offset = utils.Timestamp(t_ctype, offset=2)
expected = dict(object_hash=object_hash,
ts_data=t_data_offset,
ts_meta=t_meta_offset,
ts_ctype=t_ctype_offset,
durable=True)
expected = ('%s %s_0000000000000063 m:%x__1,t:%x__2'
% (object_hash, t_data.internal, d_meta_data,
d_ctype_data))
self.assertEqual(
expected.encode('ascii'),
ssync_sender.encode_missing(
object_hash, t_data_offset, t_meta_offset, t_ctype_offset,
durable=True))
# hex content type delta may be zero
msg = '%s %s t:0,m:%x' % (object_hash, t_data.internal, d_meta_data)
expected = dict(object_hash=object_hash,
ts_data=t_data,
ts_meta=t_meta,
ts_ctype=t_data,
durable=True)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
# unexpected zero delta is tolerated
msg = '%s %s m:0' % (object_hash, t_data.internal)
expected = dict(object_hash=object_hash,
ts_meta=t_data,
ts_data=t_data,
ts_ctype=t_data,
durable=True)
self.assertEqual(expected,
ssync_receiver.decode_missing(msg.encode('ascii')))
# unexpected subparts in timestamp delta part are tolerated
msg = '%s %s c:12345,m:%x,junk' % (object_hash,
t_data.internal,
d_meta_data)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data,
durable=True)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
# extra message parts tolerated
msg = '%s %s m:%x future parts' % (object_hash,
t_data.internal,
d_meta_data)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data,
durable=True)
self.assertEqual(expected,
ssync_receiver.decode_missing(msg.encode('ascii')))
# not durable
def check_non_durable(durable_val):
msg = '%s %s m:%x,durable:%s' % (object_hash,
t_data.internal,
d_meta_data,
durable_val)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data,
durable=False)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
check_non_durable('no')
check_non_durable('false')
check_non_durable('False')
# explicit durable (as opposed to True by default)
def check_durable(durable_val):
msg = '%s %s m:%x,durable:%s' % (object_hash,
t_data.internal,
d_meta_data,
durable_val)
expected = dict(object_hash=object_hash,
ts_meta=t_meta,
ts_data=t_data,
ts_ctype=t_data,
durable=True)
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
check_durable('yes')
check_durable('true')
check_durable('True')
def test_encode_wanted(self):
ts_iter = make_timestamp_iter()
old_t_data = next(ts_iter)
t_data = next(ts_iter)
old_t_meta = next(ts_iter)
t_meta = next(ts_iter)
remote = {
'object_hash': 'theremotehash',
'ts_data': t_data,
'ts_meta': t_meta,
}
# missing
local = {}
expected = 'theremotehash dm'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# in-sync
local = {
'ts_data': t_data,
'ts_meta': t_meta,
}
expected = None
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# out-of-sync
local = {
'ts_data': old_t_data,
'ts_meta': old_t_meta,
}
expected = 'theremotehash dm'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old data
local = {
'ts_data': old_t_data,
'ts_meta': t_meta,
}
expected = 'theremotehash d'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old metadata
local = {
'ts_data': t_data,
'ts_meta': old_t_meta,
}
expected = 'theremotehash m'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# in-sync tombstone
local = {
'ts_data': t_data,
}
expected = None
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
# old tombstone
local = {
'ts_data': old_t_data,
}
expected = 'theremotehash d'
self.assertEqual(ssync_receiver.encode_wanted(remote, local),
expected)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_ssync_receiver.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import unittest
import os
import mock
import six
import six.moves.cPickle as pickle
import tempfile
import time
import shutil
import re
import random
import struct
import collections
from eventlet import Timeout, sleep, spawn
from eventlet.green import threading
from contextlib import closing, contextmanager
from gzip import GzipFile
from shutil import rmtree
from six.moves.urllib.parse import unquote
from swift.common import utils
from swift.common.exceptions import DiskFileError, DiskFileQuarantined
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import dump_recon_cache, md5, Timestamp, mkdirs
from swift.obj import diskfile, reconstructor as object_reconstructor
from swift.common import ring
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.obj.reconstructor import SYNC, REVERT
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import (patch_policies, mocked_http_conn, FabricatedRing,
make_timestamp_iter, DEFAULT_TEST_EC_TYPE,
encode_frag_archive_bodies, quiet_eventlet_exceptions,
skip_if_no_xattrs)
from test.unit.obj.common import write_diskfile
class FakeSsyncSender(object):
def __init__(self, daemon, node, job, suffixes, ssync_calls=None,
response_callback=None, **kwargs):
if ssync_calls is not None:
call_args = {'node': node, 'job': job, 'suffixes': suffixes}
call_args.update(kwargs)
ssync_calls.append(call_args)
self.response_callback = response_callback
self.node = node
self.job = job
self.suffixes = suffixes
self.limited_by_max_objects = False
def __call__(self):
if self.response_callback:
response = self.response_callback(
self.node, self.job, self.suffixes)
else:
response = True, {}
return response
@contextmanager
def mock_ssync_sender(ssync_calls=None, response_callback=None, **kwargs):
def fake_ssync(daemon, node, job, suffixes, **kwargs):
return FakeSsyncSender(daemon, node, job, suffixes, ssync_calls,
response_callback, **kwargs)
with mock.patch('swift.obj.reconstructor.ssync_sender', fake_ssync):
yield fake_ssync
def make_ec_archive_bodies(policy, test_body):
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [test_body[x:x + segment_size]
for x in range(0, len(test_body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = \
policy.pyeclib_driver.encode(chunk) * policy.ec_duplication_factor
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [''.join(frags) for frags in zip(*fragment_payloads)]
return ec_archive_bodies
def _create_test_rings(path, next_part_power=None):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2],
[1, 2, 3],
[2, 3, 0]
]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6200},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
'port': 6200},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6200},
{'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6200}
]
intended_part_shift = 30
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift,
next_part_power),
f)
testgz = os.path.join(path, 'object-1.ring.gz')
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift,
next_part_power),
f)
def count_stats(logger, key, metric):
count = 0
for record in logger.statsd_client.calls[key]:
stat_args, stat_kwargs = record
m = stat_args[0]
if re.match(metric, m):
count += 1
return count
def get_header_frag_index(self, body):
metadata = self.policy.pyeclib_driver.get_metadata(body)
frag_index = struct.unpack('h', metadata[:2])[0]
return {
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
@patch_policies([StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=3, ec_nparity=2),
ECStoragePolicy(2, name='two',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=3, ec_nparity=2)])
class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# Tests for reconstructor using real objects in test partition directories.
legacy_durable = False
def setUp(self):
skip_if_no_xattrs()
self.testdir = tempfile.mkdtemp()
POLICIES[0].object_ring = FabricatedRing(3)
POLICIES[1].object_ring = FabricatedRing(5)
POLICIES[2].object_ring = FabricatedRing(5)
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
self.devices = os.path.join(self.testdir, 'node')
os.makedirs(self.devices)
os.mkdir(os.path.join(self.devices, 'sda1'))
self.objects = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[0]))
self.objects_1 = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[1]))
os.mkdir(self.objects)
os.mkdir(self.objects_1)
self.parts = {}
self.parts_1 = {}
self.part_nums = ['0', '1', '2']
for part in self.part_nums:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(self.parts[part])
self.parts_1[part] = os.path.join(self.objects_1, part)
os.mkdir(self.parts_1[part])
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1',
bind_ip='10.0.0.1', bind_port=6200)
self.logger = debug_logger('test-reconstructor')
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.policy = POLICIES[1]
# most of the reconstructor test methods require that there be
# real objects in place, not just part dirs, so we'll create them
# all here....
# part 0: 3C1/hash/xxx#1#d.data <-- job: sync_only - partners (FI 1)
# 061/hash/xxx#1#d.data <-- included in earlier job (FI 1)
# /xxx#2#d.data <-- job: sync_revert to index 2
# part_nodes: ['sda0', 'sda1', 'sda2', 'sda3', 'sda4']
# part 1: 3C1/hash/xxx#0#d.data <-- job: sync_revert to index 0
# /xxx#1#d.data <-- job: sync_revert to index 1
# 061/hash/xxx#1#d.data <-- included in earlier job (FI 1)
# part_nodes: ['sda5', 'sda6', 'sda7', 'sda0', 'sda1']
# part 2: 3C1/hash/xxx#2#d.data <-- job: sync_revert to index 2
# 061/hash/xxx#0#d.data <-- job: sync_revert to index 0
# part_nodes: ['sda2', 'sda3', 'sda4', 'sda5', 'sda6']
def _create_frag_archives(policy, obj_path, local_id, obj_set):
# we'll create 2 sets of objects in different suffix dirs
# so we cover all the scenarios we want (3 of them)
# 1) part dir with all FI's matching the local node index
# 2) part dir with one local and mix of others
# 3) part dir with no local FI and one or more others
def part_0(set):
if set == 0:
# just the local
return local_id
else:
# one local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 1) % 3
def part_1(set):
if set == 0:
# one local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 2) % 3
else:
# just the local node
return local_id
def part_2(set):
# this part is a handoff in our config (always)
# so lets do a set with indices from different nodes
if set == 0:
return (local_id + 1) % 3
else:
return (local_id + 2) % 3
# function dictionary for defining test scenarios base on set #
scenarios = {'0': part_0,
'1': part_1,
'2': part_2}
for part_num in self.part_nums:
# create 3 unique objects per part, each part
# will then have a unique mix of FIs for the
# possible scenarios
for obj_num in range(0, 3):
self._create_diskfile(
part=part_num, object_name='o' + str(obj_set),
policy=policy, frag_index=scenarios[part_num](obj_set),
timestamp=utils.Timestamp(t))
ips = utils.whataremyips(self.reconstructor.ring_ip)
for policy in [p for p in POLICIES if p.policy_type == EC_POLICY]:
self.ec_policy = policy
self.ec_obj_ring = self.reconstructor.load_object_ring(
self.ec_policy)
data_dir = diskfile.get_data_dir(self.ec_policy)
for local_dev in [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]:
self.ec_local_dev = local_dev
dev_path = os.path.join(self.reconstructor.devices_dir,
self.ec_local_dev['device'])
self.ec_obj_path = os.path.join(dev_path, data_dir)
# create a bunch of FA's to test
t = 1421181937.70054 # time.time()
with mock.patch('swift.obj.diskfile.time') as mock_time:
# since (a) we are using a fixed time here to create
# frags which corresponds to all the hardcoded hashes and
# (b) the EC diskfile will delete its .data file right
# after creating if it has expired, use this horrible hack
# to prevent the reclaim happening
mock_time.time.return_value = 0.0
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 0)
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 1)
break
break
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy=None, part=0, object_name='o',
frag_index=0, timestamp=None, test_data=None,
commit=True):
policy = policy or self.policy
df_mgr = self.reconstructor._df_router[policy]
df = df_mgr.get_diskfile('sda1', part, 'a', 'c', object_name,
policy=policy)
timestamp = timestamp or utils.Timestamp.now()
test_data = test_data or b'test data'
write_diskfile(df, timestamp, data=test_data, frag_index=frag_index,
commit=commit, legacy_durable=self.legacy_durable)
return df
def assert_expected_jobs(self, part_num, jobs):
# the dict diffs can be pretty big
self.maxDiff = 2048
for job in jobs:
del job['path']
del job['policy']
if 'local_index' in job:
del job['local_index']
job['suffixes'].sort()
expected = []
# part num 0
expected.append(
[{
'sync_to': [{
'index': 2,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.2',
'device': 'sda2',
'id': 2,
'weight': 1.0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 0,
'frag_index': 2,
'primary_frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 0,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.0',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.0',
'device': 'sda0',
'id': 0,
'weight': 1.0,
}, {
'index': 2,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.2',
'device': 'sda2',
'id': 2,
'weight': 1.0,
}, {
'index': 3,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.3',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.3',
'device': 'sda3',
'id': 3,
'weight': 1.0,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': ['061', '3c1'],
'partition': 0,
'frag_index': 1,
'primary_frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 1
expected.append(
[{
'sync_to': [{
'index': 1,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.2',
'device': 'sda6',
'id': 6,
'weight': 1.0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061', '3c1'],
'partition': 1,
'frag_index': 1,
'primary_frag_index': 4,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 0,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.1',
'device': 'sda5',
'id': 5,
'weight': 1.0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['3c1'],
'partition': 1,
'frag_index': 0,
'primary_frag_index': 4,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 3,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.0',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.0',
'device': 'sda0',
'id': 0,
'weight': 1.0,
}, {
'index': 0,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.1',
'device': 'sda5',
'id': 5,
'weight': 1.0,
}, {
'index': 1,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.2',
'device': 'sda6',
'id': 6,
'weight': 1.0,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': [],
'partition': 1,
'frag_index': 4,
'primary_frag_index': 4,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 2
expected.append(
[{
'sync_to': [{
'index': 0,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.2',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.2',
'device': 'sda2',
'id': 2,
'weight': 1.0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 2,
'frag_index': 0,
'primary_frag_index': None,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}, {
'sync_to': [{
'index': 2,
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.0',
'region': 1,
'port': 6200,
'replication_ip': '10.0.0.0',
'device': 'sda4',
'id': 4,
'weight': 1.0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['3c1'],
'partition': 2,
'frag_index': 2,
'primary_frag_index': None,
'device': 'sda1',
'local_dev': {
'replication_port': 6200,
'zone': 1,
'ip': '10.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '10.0.0.1',
'device': 'sda1',
'port': 6200,
'weight': 1.0,
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}]
)
def check_jobs(part_num):
try:
expected_jobs = expected[int(part_num)]
except (IndexError, ValueError):
self.fail('Unknown part number %r' % part_num)
expected_by_part_frag_index = dict(
((j['partition'], j['frag_index']), j) for j in expected_jobs)
unexpected_jobs = []
for job in jobs:
job_key = (job['partition'], job['frag_index'])
if job_key in expected_by_part_frag_index:
self.assertEqual(job, expected_by_part_frag_index[job_key])
else:
unexpected_jobs.append(job)
if unexpected_jobs:
self.fail(
'Unexpected jobs for frags %r in part num %s - '
'expected jobs for frags %r' % (
[j['frag_index'] for j in unexpected_jobs], part_num,
[k[1] for k in expected_by_part_frag_index]))
for expected_job in expected_jobs:
if expected_job in jobs:
jobs.remove(expected_job)
self.assertFalse(jobs) # that should be all of them
check_jobs(part_num)
def _run_once(self, http_count, extra_devices, override_devices=None):
id_counter = itertools.count(
max(d['id'] for d in self.policy.object_ring.devs) + 1)
for device, parts in extra_devices.items():
device_path = os.path.join(self.devices, device)
os.mkdir(device_path)
for part in range(parts):
hash_path = os.path.join(
device_path, 'objects-1', str(part), 'abc', 'hash')
os.makedirs(hash_path)
tombstone_file = utils.Timestamp(time.time()).internal + '.ts'
with open(os.path.join(hash_path, tombstone_file), 'w'):
pass
# use sda1 as a base to make is_local happy
new_device = dict(self.policy.object_ring.devs[1])
new_device['device'] = device
new_device['id'] = next(id_counter)
self.policy.object_ring.devs.append(new_device)
self.reconstructor.stats_interval = object()
can_process = threading.Event()
can_do_stats = threading.Event()
can_do_stats.set()
def fake_sleep(secs=0):
if secs is not self.reconstructor.stats_interval:
return sleep(secs)
can_do_stats.wait()
can_do_stats.clear()
can_process.set()
def fake_process(job):
can_process.wait()
can_process.clear()
can_do_stats.set()
self.reconstructor.process_job = fake_process
with mock_ssync_sender(), mock.patch(
'swift.obj.reconstructor.sleep', fake_sleep):
self.reconstructor.run_once(devices=override_devices)
def test_run_once(self):
# sda1: 3 is done in setup
extra_devices = {
'sdb1': 4,
'sdc1': 1,
'sdd1': 0,
}
with Timeout(60):
self._run_once(32, extra_devices)
stats_lines = set()
for line in self.logger.get_lines_for_level('info'):
if 'reconstructed in' not in line:
continue
stat_line = line.split('reconstructed', 1)[0].strip()
stats_lines.add(stat_line)
acceptable = set([
'2/8 (25.00%) partitions',
'3/8 (37.50%) partitions',
'4/8 (50.00%) partitions',
'5/8 (62.50%) partitions',
'6/8 (75.00%) partitions',
'7/8 (87.50%) partitions',
'8/8 (100.00%) partitions',
])
matched = stats_lines & acceptable
self.assertEqual(matched, acceptable,
'missing some expected acceptable:\n%s' % (
'\n'.join(sorted(acceptable - matched))))
self.assertEqual(self.reconstructor.reconstruction_part_count, 8)
self.assertEqual(self.reconstructor.part_count, 8)
def test_run_once_override_devices(self):
# sda1: 3 is done in setup
extra_devices = {
'sdb1': 4,
'sdc1': 1,
'sdd1': 0,
}
with Timeout(60):
self._run_once(3, extra_devices, 'sdc1')
stats_lines = set()
for line in self.logger.get_lines_for_level('info'):
if 'reconstructed in' not in line:
continue
stat_line = line.split('reconstructed', 1)[0].strip()
stats_lines.add(stat_line)
acceptable = set([
'1/1 (100.00%) partitions',
])
matched = stats_lines & acceptable
self.assertEqual(matched, acceptable,
'missing some expected acceptable:\n%s' % (
'\n'.join(sorted(acceptable - matched))))
self.assertEqual(self.reconstructor.reconstruction_part_count, 1)
self.assertEqual(self.reconstructor.part_count, 1)
def test_get_response(self):
part = self.part_nums[0]
node = self.policy.object_ring.get_part_nodes(int(part))[0]
# separate replication network
node['replication_port'] = node['port'] + 1000
def do_test(stat_code):
with mocked_http_conn(stat_code) as mock_conn:
resp = self.reconstructor._get_response(
node, self.policy, part, path='/nada', headers={})
self.assertEqual(mock_conn.requests, [{
'ssl': False,
'ip': node['replication_ip'],
'port': node['replication_port'],
'method': 'GET',
'path': '/sda0/%s/nada' % part,
'qs': None,
'headers': {},
}])
return resp
for status in (200, 400, 404, 503):
resp = do_test(status)
self.assertEqual(status, resp.status)
resp = do_test(Exception())
self.assertIsNone(resp)
# exception should result in error logs
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Trying to GET', line)
self.logger._clear()
# Timeout also should result in error logs
resp = do_test(Timeout())
self.assertIsNone(resp)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Trying to GET', line)
# sanity Timeout has extra message in the error log
self.assertIn('Timeout', line)
self.logger.clear()
def test_reconstructor_skips_bogus_partition_dirs(self):
# A directory in the wrong place shouldn't crash the reconstructor
self.reconstructor._reset_stats()
rmtree(self.objects_1)
os.mkdir(self.objects_1)
os.mkdir(os.path.join(self.objects_1, "burrito"))
jobs = []
for part_info in self.reconstructor.collect_parts():
jobs += self.reconstructor.build_reconstruction_jobs(part_info)
self.assertFalse(jobs)
def test_check_ring(self):
testring = tempfile.mkdtemp()
_create_test_rings(testring)
obj_ring = ring.Ring(testring, ring_name='object') # noqa
self.assertTrue(self.reconstructor.check_ring(obj_ring))
orig_check = self.reconstructor.next_check
self.reconstructor.next_check = orig_check - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check
orig_ring_time = obj_ring._mtime
obj_ring._mtime = orig_ring_time - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check - 30
self.assertFalse(self.reconstructor.check_ring(obj_ring))
rmtree(testring, ignore_errors=1)
def test_reconstruct_check_ring(self):
# test reconstruct logs info when check_ring is false and that
# there are no jobs built
objects_2 = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[2]))
os.mkdir(objects_2)
for part in ['0', '1', '2']:
os.mkdir(os.path.join(objects_2, part))
with mock.patch.object(self.reconstructor, 'process_job') as mock_pj, \
mock.patch(
'swift.obj.reconstructor.ObjectReconstructor.check_ring',
side_effect=lambda ring: ring is not POLICIES[1].object_ring):
self.reconstructor.reconstruct()
msgs = self.logger.get_lines_for_level('info')
self.assertEqual(1, msgs.count(
'Ring change detected for policy 1 (one). Aborting '
'current reconstruction pass for this policy.'), msgs)
self.assertEqual(
[call[1][0]['job_type'] for call in mock_pj.mock_calls],
['sync_only'] * 2)
self.assertEqual(
[call[1][0]['policy'] for call in mock_pj.mock_calls],
[POLICIES[2]] * 2)
# partition 2 doesn't belong here and doesn't have data,
# so it just gets cleaned up
self.assertEqual(
{call[1][0]['partition'] for call in mock_pj.mock_calls},
{0, 1})
def test_build_reconstruction_jobs(self):
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertTrue(jobs[0]['job_type'] in
(object_reconstructor.SYNC,
object_reconstructor.REVERT))
self.assert_expected_jobs(part_info['partition'], jobs)
def test_handoffs_only(self):
self.reconstructor.handoffs_only = True
found_job_types = set()
def fake_process_job(job):
# increment failure counter
self.reconstructor.handoffs_remaining += 1
found_job_types.add(job['job_type'])
self.reconstructor.process_job = fake_process_job
_orig_build_jobs = self.reconstructor.build_reconstruction_jobs
built_jobs = []
def capture_jobs(part_info):
jobs = _orig_build_jobs(part_info)
built_jobs.append((part_info, jobs))
return jobs
with mock.patch.object(self.reconstructor, 'build_reconstruction_jobs',
capture_jobs):
self.reconstructor.reconstruct()
# only revert jobs
found = [(part_info['partition'], set(
j['job_type'] for j in jobs))
for part_info, jobs in built_jobs]
self.assertEqual([
# partition, job_types
(2, {'sync_revert'}),
], found)
self.assertEqual(found_job_types, {object_reconstructor.REVERT})
# but failures keep handoffs remaining
msgs = self.logger.get_lines_for_level('info')
self.assertIn('Next pass will continue to revert handoffs', msgs[-1])
self.logger._clear()
found_job_types = set()
def fake_process_job(job):
# success does not increment failure counter
found_job_types.add(job['job_type'])
self.reconstructor.process_job = fake_process_job
# only revert jobs ... but all handoffs cleared out successfully
self.reconstructor.reconstruct()
self.assertEqual(found_job_types, {object_reconstructor.REVERT})
# it's time to turn off handoffs_only
msgs = self.logger.get_lines_for_level('warning')
self.assertIn('You should disable handoffs_only', msgs[-1])
def test_get_partners(self):
expected = (
# node_index, part_nodes => partners
(0, [0, 1, 2, 3], [3, 1, 2]),
(0, [2, 3, 1, 0], [0, 3, 1]),
(0, [0, 1, 2, 3, 4], [4, 1, 2]),
(0, [0, 1, 2, 3, 4, 5], [5, 1, 3]),
(1, [0, 1, 2, 3, 4, 5], [0, 2, 4]),
(2, [0, 1, 2, 3, 4, 5], [1, 3, 5]),
(3, [0, 1, 2, 3, 4, 5], [2, 4, 0]),
(4, [0, 1, 2, 3, 4, 5], [3, 5, 1]),
(5, [0, 1, 2, 3, 4, 5], [4, 0, 2]),
(5, [1, 4, 0, 2, 3, 5], [3, 1, 0]),
)
failures = []
for frag_index, part_nodes, partners in expected:
sync_to = object_reconstructor._get_partners(
frag_index, part_nodes)
if partners != sync_to:
failures.append('Given nodes %r for index %s we expected '
'%r but got %r' % (
part_nodes, frag_index, partners, sync_to))
if failures:
failures.insert(0, 'Some test scenarios failed:')
self.fail('\n'.join(failures))
def test_iter_nodes_for_frag(self):
# no limit
self.reconstructor.rebuild_handoff_node_count = -1
policy = ECStoragePolicy(1, name='test', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=3)
policy.object_ring = FabricatedRing(replicas=7, devices=28)
primaries = policy.object_ring.get_part_nodes(0)
node = primaries[0]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [0, 0, 7, 14]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(0, node['backend_index'])
node = primaries[3]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [3, 3, 10, 17]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(3, node['backend_index'])
node = primaries[-1]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [6, 6, 13, 20]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(6, node['backend_index'])
# default limit is 2
self.reconstructor.rebuild_handoff_node_count = 2
node = primaries[0]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [0, 0, 7]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(0, node['backend_index'])
# zero means only primaries
self.reconstructor.rebuild_handoff_node_count = 0
node = primaries[0]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [0]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(0, node['backend_index'])
def test_collect_parts(self):
self.reconstructor._reset_stats()
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
self.assertEqual(sorted(parts), [0, 1, 2])
def test_collect_parts_mkdirs_error(self):
def blowup_mkdirs(path):
raise OSError('Ow!')
self.reconstructor._reset_stats()
with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
rmtree(self.objects_1, ignore_errors=1)
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(
len(error_lines), 2,
'Expected exactly two errors, got %r' % error_lines)
log_args, log_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!')
log_args, log_kwargs = self.logger.log_dict['error'][1]
self.assertEqual(str(log_kwargs['exc_info'][1]), 'Ow!')
def test_removes_zbf(self):
# suppress unmount warning
os.mkdir(os.path.join(self.devices, 'sda5'))
# After running xfs_repair, a partition directory could become a
# zero-byte file. If this happens, the reconstructor should clean it
# up, log something, and move on to the next partition.
# Surprise! Partition dir 1 is actually a zero-byte file.
pol_1_part_1_path = os.path.join(self.objects_1, '1')
rmtree(pol_1_part_1_path)
with open(pol_1_part_1_path, 'w'):
pass
self.assertTrue(os.path.isfile(pol_1_part_1_path)) # sanity check
self.reconstructor.process_job = lambda j: None
self.reconstructor.reconstruct()
self.assertFalse(os.path.exists(pol_1_part_1_path))
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(2, len(warnings))
# first warning is due to get_hashes failing to take lock on non-dir
self.assertIn(pol_1_part_1_path + '/hashes.pkl', warnings[0])
self.assertIn('unable to read', warnings[0].lower())
self.assertIn(pol_1_part_1_path, warnings[1])
self.assertIn('not a directory', warnings[1].lower())
def test_ignores_status_file(self):
# Following fd86d5a, the auditor will leave status files on each device
# until an audit can complete. The reconstructor should ignore these
@contextmanager
def status_files(*auditor_types):
status_paths = [os.path.join(self.objects_1,
'auditor_status_%s.json' % typ)
for typ in auditor_types]
for status_path in status_paths:
self.assertFalse(os.path.exists(status_path)) # sanity check
with open(status_path, 'w'):
pass
self.assertTrue(os.path.isfile(status_path)) # sanity check
try:
yield status_paths
finally:
for status_path in status_paths:
try:
os.unlink(status_path)
except OSError as e:
if e.errno != 2:
raise
# suppress unmount warning
os.mkdir(os.path.join(self.devices, 'sda5'))
# since our collect_parts job is a generator, that yields directly
# into build_jobs and then spawns it's safe to do the remove_files
# without making reconstructor startup slow
with status_files('ALL', 'ZBF') as status_paths:
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
self.assertNotIn(part_info['part_path'], status_paths)
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(0, len(warnings))
for status_path in status_paths:
self.assertTrue(os.path.exists(status_path))
def _make_fake_ssync(self, ssync_calls, fail_jobs=None):
"""
Replace SsyncSender with a thin Fake.
:param ssync_calls: an empty list, a non_local, all calls to ssync will
be captured for assertion in the caller.
:param fail_jobs: optional iter of dicts, any job passed into Fake that
matches a failure dict will return success == False.
"""
class _fake_ssync(object):
def __init__(self, daemon, node, job, suffixes,
include_non_durable=False, max_objects=0,
**kwargs):
# capture context and generate an available_map of objs
context = {}
context['node'] = node
context['job'] = job
context['suffixes'] = suffixes
context['max_objects'] = max_objects
self.suffixes = suffixes
self.daemon = daemon
self.job = job
frag_prefs = [] if include_non_durable else None
hash_gen = self.daemon._df_router[job['policy']].yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'),
frag_prefs=frag_prefs)
self.available_map = {}
self.limited_by_max_objects = False
nlines = 0
for hash_, timestamps in hash_gen:
self.available_map[hash_] = timestamps
nlines += 1
if 0 < max_objects <= nlines:
break
for _ in hash_gen:
self.limited_by_max_objects = True
break
context['available_map'] = self.available_map
ssync_calls.append(context)
self.success = True
for failure in (fail_jobs or []):
if all(job.get(k) == v for (k, v) in failure.items()):
self.success = False
break
context['success'] = self.success
context['include_non_durable'] = include_non_durable
def __call__(self, *args, **kwargs):
return self.success, self.available_map if self.success else {}
return _fake_ssync
def test_delete_reverted(self):
# verify reconstructor deletes reverted frag indexes after ssync'ing
def visit_obj_dirs(context):
for suff in context['suffixes']:
suff_dir = os.path.join(
context['job']['path'], suff)
for root, dirs, files in os.walk(suff_dir):
for d in dirs:
dirpath = os.path.join(root, d)
files = os.listdir(dirpath)
yield dirpath, files
n_files = n_files_after = 0
# run reconstructor with delete function mocked out to check calls
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)), \
mocked_http_conn(*[200] * 6, body=pickle.dumps({})), \
mock.patch.object(
self.reconstructor, 'delete_reverted_objs') as mock_delete:
self.reconstructor.reconstruct()
expected_calls = []
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
self.assertTrue(context.get('include_non_durable'))
for dirpath, files in visit_obj_dirs(context):
# sanity check - expect some files to be in dir,
# may not be for the reverted frag index
self.assertTrue(files)
n_files += len(files)
self.assertEqual(context['job']['frag_index'],
context['node']['index'])
expected_calls.append(mock.call(context['job'],
context['available_map']))
else:
self.assertFalse(context.get('include_non_durable'))
self.assertEqual(0, context.get('max_objects'))
mock_delete.assert_has_calls(expected_calls, any_order=True)
# N.B. in this next test sequence we acctually delete files after
# revert, so the on-disk hashes can change. In partition 1, if the
# revert jobs (for frag_index 0 or 1) run before the sync job
# (frag_index 4) all suffixes will get removed and the sync job won't
# have anything to ship the remote (meaning there's no post-sync
# REPLICATE call). To keep the number of mocked_http_conn responses
# predictable we force a stable job order by mocking random's shuffle.
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)), \
mocked_http_conn(*[200] * 6, body=pickle.dumps({})), \
mock.patch('swift.obj.reconstructor.random.shuffle'):
self.reconstructor.reconstruct()
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
self.assertTrue(context.get('include_non_durable'))
data_file_tail = ('#%s.data'
% context['node']['index'])
for dirpath, files in visit_obj_dirs(context):
n_files_after += len(files)
for filename in files:
self.assertFalse(
filename.endswith(data_file_tail), filename)
else:
self.assertFalse(context.get('include_non_durable'))
self.assertEqual(0, context.get('max_objects'))
# sanity check that some files should were deleted
self.assertGreater(n_files, n_files_after)
def test_max_objects_per_revert_only_for_revert_jobs(self):
# verify max_objects_per_revert option is only passed to revert jobs
ssync_calls = []
conf = dict(self.conf, max_objects_per_revert=2)
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)), \
mocked_http_conn(*[200] * 6, body=pickle.dumps({})):
reconstructor = object_reconstructor.ObjectReconstructor(
conf, logger=self.logger)
reconstructor.reconstruct()
reverts = syncs = 0
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
self.assertEqual(2, context.get('max_objects'))
reverts += 1
else:
self.assertEqual(0, context.get('max_objects'))
syncs += 1
self.assertGreater(reverts, 0)
self.assertGreater(syncs, 0)
def test_delete_reverted_nondurable(self):
# verify reconstructor only deletes reverted nondurable fragments older
# commit_window
shutil.rmtree(self.ec_obj_path)
ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]
partition = (local_devs[0]['id'] + 1) % 3
# recent non-durable
df_recent = self._create_diskfile(
object_name='recent', part=partition, commit=False)
datafile_recent = df_recent.manager.cleanup_ondisk_files(
df_recent._datadir, frag_prefs=[])['data_file']
# older non-durable but with recent mtime
df_older = self._create_diskfile(
object_name='older', part=partition, commit=False,
timestamp=Timestamp(time.time() - 61))
datafile_older = df_older.manager.cleanup_ondisk_files(
df_older._datadir, frag_prefs=[])['data_file']
# durable
df_durable = self._create_diskfile(
object_name='durable', part=partition, commit=True)
datafile_durable = df_durable.manager.cleanup_ondisk_files(
df_durable._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
self.assertTrue(os.path.exists(datafile_durable))
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.handoffs_only = True
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
self.assertTrue(context.get('include_non_durable'))
# neither nondurable should be removed yet with default commit_window
# because their mtimes are too recent
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
# but durable is purged
self.assertFalse(os.path.exists(datafile_durable), datafile_durable)
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.handoffs_only = True
# let the file get a little bit older and turn down the
# commit_window...
sleep(0.01)
df_older.manager.commit_window = 0.005
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
self.assertTrue(context.get('include_non_durable'))
# ...now the nondurables get purged
self.assertFalse(os.path.exists(datafile_recent))
self.assertFalse(os.path.exists(datafile_older))
def test_sync_old_nondurable_before_committed_non_zero_commit_window(self):
# verify that a *recently written* nondurable fragment survives being
# visited by the reconstructor, despite having timestamp older than
# reclaim_age
shutil.rmtree(self.ec_obj_path)
ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]
partition = local_devs[0]['id']
# recently written, recent timestamp non-durable
ts_recent = Timestamp(time.time())
df_mgr = self.reconstructor._df_router[self.policy]
reclaim_age = df_mgr.reclaim_age
df_recent = self._create_diskfile(
object_name='recent', part=partition, commit=False,
timestamp=ts_recent, frag_index=4)
datafile_recent = df_recent.manager.cleanup_ondisk_files(
df_recent._datadir, frag_prefs=[])['data_file']
# recently written but old timestamp non-durable
ts_old = Timestamp(time.time() - reclaim_age - 1)
df_older = self._create_diskfile(
object_name='older', part=partition, commit=False,
timestamp=ts_old, frag_index=4)
datafile_older = df_older.manager.cleanup_ondisk_files(
df_older._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
# for this test we don't actually need to ssync anything, so pretend
# all suffixes are in sync
self.reconstructor._get_suffixes_to_sync = (
lambda job, node: ([], node))
df_mgr.commit_window = 1000 # avoid non-durables being reclaimed
self.reconstructor.reconstruct()
# neither nondurable should be removed yet with default commit_window
# because their mtimes are too recent
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
# and we can still make the nondurables durable
df_recent.writer().commit(ts_recent)
self.assertTrue(os.path.exists(datafile_recent.replace('#4', '#4#d')))
df_older.writer().commit(ts_old)
self.assertTrue(os.path.exists(datafile_older.replace('#4', '#4#d')))
def test_sync_old_nondurable_before_committed_zero_commit_window(self):
# verify that a *recently written* nondurable fragment won't survive
# being visited by the reconstructor if its timestamp is older than
# reclaim_age and commit_window is zero; this test illustrates the
# potential data loss bug that commit_window addresses
shutil.rmtree(self.ec_obj_path)
ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]
partition = local_devs[0]['id']
# recently written, recent timestamp non-durable
ts_recent = Timestamp(time.time())
df_mgr = self.reconstructor._df_router[self.policy]
reclaim_age = df_mgr.reclaim_age
df_recent = self._create_diskfile(
object_name='recent', part=partition, commit=False,
timestamp=ts_recent, frag_index=4)
datafile_recent = df_recent.manager.cleanup_ondisk_files(
df_recent._datadir, frag_prefs=[])['data_file']
# recently written but old timestamp non-durable
ts_old = Timestamp(time.time() - reclaim_age - 1)
df_older = self._create_diskfile(
object_name='older', part=partition, commit=False,
timestamp=ts_old, frag_index=4)
datafile_older = df_older.manager.cleanup_ondisk_files(
df_older._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
# for this test we don't actually need to ssync anything, so pretend
# all suffixes are in sync
self.reconstructor._get_suffixes_to_sync = (
lambda job, node: ([], node))
df_mgr.commit_window = 0
with mock.patch(
'swift.obj.diskfile.is_file_older') as mock_is_file_older:
self.reconstructor.reconstruct()
# older nondurable will be removed with commit_window = 0
self.assertTrue(os.path.exists(datafile_recent))
self.assertFalse(os.path.exists(datafile_older))
df_recent.writer().commit(ts_recent)
self.assertTrue(os.path.exists(datafile_recent.replace('#4', '#4#d')))
# ...and attempt to commit will fail :(
with self.assertRaises(DiskFileError):
df_older.writer().commit(ts_old)
# with zero commit_window the call to stat the file is not made
mock_is_file_older.assert_not_called()
def test_sync_old_nondurable_before_committed_past_commit_window(self):
# verify that a *not so recently written* nondurable fragment won't
# survive being visited by the reconstructor if its timestamp is older
# than reclaim_age
shutil.rmtree(self.ec_obj_path)
ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]
partition = local_devs[0]['id']
# recently written, recent timestamp non-durable
ts_recent = Timestamp(time.time())
df_mgr = self.reconstructor._df_router[self.policy]
reclaim_age = df_mgr.reclaim_age
df_recent = self._create_diskfile(
object_name='recent', part=partition, commit=False,
timestamp=ts_recent, frag_index=4)
datafile_recent = df_recent.manager.cleanup_ondisk_files(
df_recent._datadir, frag_prefs=[])['data_file']
# recently written but old timestamp non-durable
ts_old = Timestamp(time.time() - reclaim_age - 1)
df_older = self._create_diskfile(
object_name='older', part=partition, commit=False,
timestamp=ts_old, frag_index=4)
datafile_older = df_older.manager.cleanup_ondisk_files(
df_older._datadir, frag_prefs=[])['data_file']
# pretend file was written more than commit_window seconds ago
now = time.time()
os.utime(datafile_older, (now - 60.1, now - 60.1))
self.assertTrue(os.path.exists(datafile_recent))
self.assertTrue(os.path.exists(datafile_older))
# for this test we don't actually need to ssync anything, so pretend
# all suffixes are in sync
self.reconstructor._get_suffixes_to_sync = (
lambda job, node: ([], node))
# leave commit_window at default of 60 seconds
self.reconstructor.reconstruct()
# older nondurable will be removed
self.assertTrue(os.path.exists(datafile_recent))
self.assertFalse(os.path.exists(datafile_older))
df_recent.writer().commit(ts_recent)
self.assertTrue(os.path.exists(datafile_recent.replace('#4', '#4#d')))
# ...and attempt to commit will fail :(
with self.assertRaises(DiskFileError):
df_older.writer().commit(ts_old)
def test_delete_reverted_max_objects_per_revert(self):
# verify reconstructor only deletes objects that were actually reverted
# when ssync is limited by max_objects_per_revert
shutil.rmtree(self.ec_obj_path)
ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]
partition = (local_devs[0]['id'] + 1) % 3
# three durable objects
df_0 = self._create_diskfile(
object_name='zero', part=partition)
datafile_0 = df_0.manager.cleanup_ondisk_files(
df_0._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_0))
df_1 = self._create_diskfile(
object_name='one', part=partition)
datafile_1 = df_1.manager.cleanup_ondisk_files(
df_1._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_1))
df_2 = self._create_diskfile(
object_name='two', part=partition)
datafile_2 = df_2.manager.cleanup_ondisk_files(
df_2._datadir, frag_prefs=[])['data_file']
self.assertTrue(os.path.exists(datafile_2))
datafiles = [datafile_0, datafile_1, datafile_2]
actual_datafiles = [df for df in datafiles if os.path.exists(df)]
self.assertEqual(datafiles, actual_datafiles)
# only two objects will be sync'd and purged...
ssync_calls = []
conf = dict(self.conf, max_objects_per_revert=2, handoffs_only=True)
self.reconstructor = object_reconstructor.ObjectReconstructor(
conf, logger=self.logger)
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
self.assertEqual(2, context.get('max_objects'))
actual_datafiles = [df for df in datafiles if os.path.exists(df)]
self.assertEqual(1, len(actual_datafiles), actual_datafiles)
# handoff still reported as remaining
self.assertEqual(1, self.reconstructor.handoffs_remaining)
# ...until next reconstructor run which will sync and purge the last
# object; max_objects_per_revert == actual number of objects
ssync_calls = []
conf = dict(self.conf, max_objects_per_revert=1, handoffs_only=True)
self.reconstructor = object_reconstructor.ObjectReconstructor(
conf, logger=self.logger)
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
self.assertEqual(1, context.get('max_objects'))
actual_datafiles = [df for df in datafiles if os.path.exists(df)]
self.assertEqual([], actual_datafiles)
# handoff is no longer remaining
self.assertEqual(0, self.reconstructor.handoffs_remaining)
def test_no_delete_failed_revert(self):
# test will only process revert jobs
self.reconstructor.handoffs_only = True
# suppress unmount warning
os.mkdir(os.path.join(self.devices, 'sda5'))
captured_ssync = []
# fail all jobs on part 2 on sda1
fail_jobs = [
{'device': 'sda1', 'partition': 2},
]
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(
captured_ssync, fail_jobs=fail_jobs)), \
mocked_http_conn() as request_log:
self.reconstructor.reconstruct()
self.assertFalse(request_log.unexpected_requests)
# global setup has four revert jobs
self.assertEqual(len(captured_ssync), 2)
expected_ssync_calls = {
# device, part, frag_index: expected_occurrences
('sda1', 2, 2, True): 1,
('sda1', 2, 0, True): 1,
}
self.assertEqual(expected_ssync_calls, dict(collections.Counter(
(context['job']['device'],
context['job']['partition'],
context['job']['frag_index'],
context['include_non_durable'])
for context in captured_ssync
)))
# failed jobs don't sync suffixes
self.assertFalse(
self.logger.get_lines_for_level('warning'))
self.assertFalse(
self.logger.get_lines_for_level('error'))
# handoffs remaining and part exists
self.assertEqual(2, self.reconstructor.handoffs_remaining)
self.assertTrue(os.path.exists(self.parts_1['2']))
# again with no failures
captured_ssync = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(captured_ssync)):
self.reconstructor.reconstruct()
# same jobs
self.assertEqual(len(captured_ssync), 2)
self.assertFalse(
self.logger.get_lines_for_level('error'))
# handoffs are cleaned up
self.assertEqual(0, self.reconstructor.handoffs_remaining)
warning_msgs = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_msgs))
self.assertIn('no handoffs remaining', warning_msgs[0])
# need one more pass to cleanup the part dir
self.assertTrue(os.path.exists(self.parts_1['2']))
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync([])), \
mocked_http_conn() as request_log:
self.reconstructor.reconstruct()
self.assertFalse(os.path.exists(self.parts_1['2']))
def test_get_part_jobs(self):
# yeah, this test code expects a specific setup
self.assertEqual(len(self.part_nums), 3)
# OK, at this point we should have 4 loaded parts with one
jobs = []
for partition in os.listdir(self.ec_obj_path):
part_path = os.path.join(self.ec_obj_path, partition)
jobs = self.reconstructor._get_part_jobs(
self.ec_local_dev, part_path, int(partition), self.ec_policy)
self.assert_expected_jobs(partition, jobs)
def assertStatCount(self, stat_method, stat_prefix, expected_count):
count = count_stats(self.logger, stat_method, stat_prefix)
msg = 'expected %s != %s for %s %s' % (
expected_count, count, stat_method, stat_prefix)
self.assertEqual(expected_count, count, msg)
def test_delete_partition(self):
# part 2 is predefined to have all revert jobs
part_path = os.path.join(self.objects_1, '2')
self.assertTrue(os.access(part_path, os.F_OK))
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
expected_ssync_calls = sorted([
(u'10.0.0.0', REVERT, 2, [u'3c1'], True),
(u'10.0.0.2', REVERT, 2, [u'061'], True),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['job_type'],
c['job']['partition'],
c['suffixes'],
c.get('include_non_durable')
) for c in ssync_calls))
expected_stats = {
('increment', 'partition.delete.count.'): 2,
('timing_since', 'partition.delete.timing'): 2,
}
for stat_key, expected in expected_stats.items():
stat_method, stat_prefix = stat_key
self.assertStatCount(stat_method, stat_prefix, expected)
stub_data = self.reconstructor._get_hashes(
'sda1', 2, self.policy, do_listdir=True)
stub_data.update({'7ca': {None: '8f19c38e1cf8e2390d4ca29051407ae3'}})
pickle_path = os.path.join(part_path, 'hashes.pkl')
with open(pickle_path, 'wb') as f:
pickle.dump(stub_data, f)
# part 2 should be totally empty
hash_gen = self.reconstructor._df_router[self.policy].yield_hashes(
'sda1', '2', self.policy, suffixes=stub_data.keys())
for hash_, ts in hash_gen:
self.fail('found %s : %s' % (hash_, ts))
new_hashes = self.reconstructor._get_hashes(
'sda1', 2, self.policy, do_listdir=True)
self.assertFalse(new_hashes)
# N.B. the partition directory is removed next pass
ssync_calls = []
with mocked_http_conn() as request_log:
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
self.assertEqual([], ssync_calls)
self.assertEqual([], request_log.requests)
self.assertFalse(os.access(part_path, os.F_OK))
def test_process_job_all_success(self):
rehash_per_job_type = {SYNC: 1, REVERT: 0}
self.reconstructor._reset_stats()
with mock_ssync_sender():
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger.clear()
node_count = len(job['sync_to'])
rehash_count = node_count * rehash_per_job_type[
job['job_type']]
with mocked_http_conn(*[200] * rehash_count,
body=pickle.dumps({})):
self.reconstructor.process_job(job)
if job['job_type'] == object_reconstructor.REVERT:
self.assertStatCount('update_stats',
'suffix.hashes', 0)
else:
self.assertStatCount('update_stats',
'suffix.hashes', node_count)
self.assertStatCount('update_stats',
'suffix.syncs', node_count)
self.assertNotIn('error', self.logger.all_log_lines())
self.assertEqual(
dict(collections.Counter((job['device'], job['partition'],
job['frag_index'], job['job_type'])
for job in found_jobs)),
{('sda1', 0, 1, SYNC): 1,
('sda1', 0, 2, REVERT): 1,
('sda1', 1, 0, REVERT): 1,
('sda1', 1, 1, REVERT): 1,
('sda1', 1, 4, SYNC): 1,
('sda1', 2, 0, REVERT): 1,
('sda1', 2, 2, REVERT): 1})
self.assertEqual(self.reconstructor.suffix_sync, 12)
self.assertEqual(self.reconstructor.suffix_count, 12)
self.assertEqual(self.reconstructor.reconstruction_count, 7)
def test_process_job_all_insufficient_storage(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[507] * 10):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger.clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('responded as unmounted', line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(
dict(collections.Counter((job['device'], job['partition'],
job['frag_index'], job['job_type'])
for job in found_jobs)),
{('sda1', 0, 1, SYNC): 1,
('sda1', 0, 2, REVERT): 1,
('sda1', 1, 0, REVERT): 1,
('sda1', 1, 1, REVERT): 1,
('sda1', 1, 4, SYNC): 1,
('sda1', 2, 0, REVERT): 1,
('sda1', 2, 2, REVERT): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 7)
def test_process_job_all_client_error(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[400] * 6):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger.clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Invalid response 400', line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 1, 4): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 7)
def test_process_job_all_timeout(self):
self.reconstructor._reset_stats()
with mock_ssync_sender(), mocked_http_conn(*[Timeout()] * 6):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger.clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertIn('Timeout (Nones)', line)
self.assertStatCount(
'update_stats', 'suffix.hashes', 0)
self.assertStatCount(
'update_stats', 'suffix.syncs', 0)
self.assertEqual(
dict(collections.Counter(
(job['device'], job['partition'], job['frag_index'])
for job in found_jobs)),
{('sda1', 0, 1): 1,
('sda1', 0, 2): 1,
('sda1', 1, 0): 1,
('sda1', 1, 1): 1,
('sda1', 1, 4): 1,
('sda1', 2, 0): 1,
('sda1', 2, 2): 1})
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(self.reconstructor.reconstruction_count, 7)
def test_reconstructor_skipped_partpower_increase(self):
self.reconstructor._reset_stats()
_create_test_rings(self.testdir, 10)
# Enforce re-reading the EC ring
POLICIES[1].object_ring = ring.Ring(self.testdir, ring_name='object-1')
self.reconstructor.reconstruct()
self.assertEqual(0, self.reconstructor.reconstruction_count)
warnings = self.logger.get_lines_for_level('warning')
self.assertIn(
"next_part_power set in policy 'one'. Skipping", warnings)
class TestGlobalSetupObjectReconstructorLegacyDurable(
TestGlobalSetupObjectReconstructor):
# Tests for reconstructor using real objects in test partition directories.
legacy_durable = True
@patch_policies(with_ec_default=True)
class TestWorkerReconstructor(unittest.TestCase):
maxDiff = None
def setUp(self):
super(TestWorkerReconstructor, self).setUp()
self.logger = debug_logger()
self.testdir = tempfile.mkdtemp()
self.recon_cache_path = os.path.join(self.testdir, 'recon')
self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
# dump_recon_cache expects recon_cache_path to exist
os.mkdir(self.recon_cache_path)
def tearDown(self):
super(TestWorkerReconstructor, self).tearDown()
shutil.rmtree(self.testdir)
def test_no_workers_by_default(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
self.assertEqual(0, reconstructor.reconstructor_workers)
self.assertEqual(0, len(list(reconstructor.get_worker_args())))
def test_bad_value_workers(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '-1'}, logger=self.logger)
self.assertEqual(-1, reconstructor.reconstructor_workers)
self.assertEqual(0, len(list(reconstructor.get_worker_args())))
def test_workers_with_no_devices(self):
def do_test(num_workers):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': num_workers}, logger=self.logger)
self.assertEqual(num_workers, reconstructor.reconstructor_workers)
self.assertEqual(1, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': [],
'multiprocess_worker_index': 0},
], list(reconstructor.get_worker_args()))
do_test(1)
do_test(10)
def test_workers_with_devices_and_no_valid_overrides(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
# N.B. sdz is not in local_devices so there are no devices to process
# but still expect a single worker process
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdz'))
self.assertEqual(1, len(worker_args))
self.assertEqual([{'override_partitions': [],
'override_devices': ['sdz'],
'multiprocess_worker_index': 0}],
worker_args)
# overrides are ignored in forever mode
worker_args = list(reconstructor.get_worker_args(
once=False, devices='sdz'))
self.assertEqual(2, len(worker_args))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['sdc'],
'multiprocess_worker_index': 1},
], worker_args)
def test_workers_with_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
expected = [
{'override_partitions': [], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['sdc'],
'multiprocess_worker_index': 1},
]
worker_args = list(reconstructor.get_worker_args(once=False))
self.assertEqual(2, len(worker_args))
self.assertEqual(expected, worker_args)
worker_args = list(reconstructor.get_worker_args(once=True))
self.assertEqual(2, len(worker_args))
self.assertEqual(expected, worker_args)
def test_workers_with_devices_and_overrides(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(2, reconstructor.reconstructor_workers)
# check we don't get more workers than override devices...
# N.B. sdz is not in local_devices so should be ignored for the
# purposes of generating workers
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdz', partitions='99,333'))
self.assertEqual(1, len(worker_args))
self.assertEqual(
[{'override_partitions': [99, 333], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0}],
worker_args)
# overrides are ignored in forever mode
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
worker_args = list(reconstructor.get_worker_args(
once=False, devices='sdb,sdz', partitions='99,333'))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['sdc'],
'multiprocess_worker_index': 1}
], worker_args)
def test_workers_with_lots_of_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(2, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [],
'override_devices': ['sdb', 'sdd', 'sdf'],
'multiprocess_worker_index': 0},
{'override_partitions': [],
'override_devices': ['sdc', 'sde'],
'multiprocess_worker_index': 1},
], list(reconstructor.get_worker_args()))
def test_workers_with_lots_of_devices_and_overrides(self):
# check that override devices get distributed across workers
# in similar fashion to all devices
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '2'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(2, reconstructor.reconstructor_workers)
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdd,sdf', partitions='99,333'))
# 3 devices to operate on, 2 workers -> one worker gets two devices
# and the other worker just gets one
self.assertEqual([{
'override_partitions': [99, 333],
'override_devices': ['sdb', 'sdf'],
'multiprocess_worker_index': 0,
}, {
'override_partitions': [99, 333],
'override_devices': ['sdd'],
'multiprocess_worker_index': 1,
}], worker_args)
# with 4 override devices, expect 2 per worker
worker_args = list(reconstructor.get_worker_args(
once=True, devices='sdb,sdc,sdd,sdf', partitions='99,333'))
self.assertEqual(2, len(worker_args))
self.assertEqual([
{'override_partitions': [99, 333],
'override_devices': ['sdb', 'sdd'],
'multiprocess_worker_index': 0},
{'override_partitions': [99, 333],
'override_devices': ['sdc', 'sdf'],
'multiprocess_worker_index': 1},
], worker_args)
def test_workers_with_lots_of_workers(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '10'}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sdb', 'sdc']
self.assertEqual(10, reconstructor.reconstructor_workers)
self.assertEqual(2, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['sdc'],
'multiprocess_worker_index': 1},
], list(reconstructor.get_worker_args()))
def test_workers_with_lots_of_workers_and_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'reconstructor_workers': '10'}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'sdb', 'sdc', 'sdd', 'sde', 'sdf']
self.assertEqual(10, reconstructor.reconstructor_workers)
self.assertEqual(5, len(list(reconstructor.get_worker_args())))
self.assertEqual([
{'override_partitions': [], 'override_devices': ['sdb'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['sdc'],
'multiprocess_worker_index': 1},
{'override_partitions': [], 'override_devices': ['sdd'],
'multiprocess_worker_index': 2},
{'override_partitions': [], 'override_devices': ['sde'],
'multiprocess_worker_index': 3},
{'override_partitions': [], 'override_devices': ['sdf'],
'multiprocess_worker_index': 4},
], list(reconstructor.get_worker_args()))
def test_workers_with_some_workers_and_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
reconstructor.get_local_devices = lambda: [
'd%s' % (i + 1) for i in range(21)]
# With more devices than workers, the work is spread out as evenly
# as we can manage. When number-of-devices is a multiple of
# number-of-workers, every worker has the same number of devices to
# operate on.
reconstructor.reconstructor_workers = 7
worker_args = list(reconstructor.get_worker_args())
self.assertEqual([len(a['override_devices']) for a in worker_args],
[3] * 7)
# When number-of-devices is not a multiple of number-of-workers,
# device counts differ by at most 1.
reconstructor.reconstructor_workers = 5
worker_args = list(reconstructor.get_worker_args())
self.assertEqual(
sorted([len(a['override_devices']) for a in worker_args]),
[4, 4, 4, 4, 5])
# With more workers than devices, we don't create useless workers.
# We'll only make one per device.
reconstructor.reconstructor_workers = 22
worker_args = list(reconstructor.get_worker_args())
self.assertEqual(
[len(a['override_devices']) for a in worker_args],
[1] * 21)
# This is true even if we have far more workers than devices.
reconstructor.reconstructor_workers = 2 ** 16
worker_args = list(reconstructor.get_worker_args())
self.assertEqual(
[len(a['override_devices']) for a in worker_args],
[1] * 21)
# Spot check one full result for sanity's sake
reconstructor.reconstructor_workers = 11
self.assertEqual([
{'override_partitions': [], 'override_devices': ['d1', 'd12'],
'multiprocess_worker_index': 0},
{'override_partitions': [], 'override_devices': ['d2', 'd13'],
'multiprocess_worker_index': 1},
{'override_partitions': [], 'override_devices': ['d3', 'd14'],
'multiprocess_worker_index': 2},
{'override_partitions': [], 'override_devices': ['d4', 'd15'],
'multiprocess_worker_index': 3},
{'override_partitions': [], 'override_devices': ['d5', 'd16'],
'multiprocess_worker_index': 4},
{'override_partitions': [], 'override_devices': ['d6', 'd17'],
'multiprocess_worker_index': 5},
{'override_partitions': [], 'override_devices': ['d7', 'd18'],
'multiprocess_worker_index': 6},
{'override_partitions': [], 'override_devices': ['d8', 'd19'],
'multiprocess_worker_index': 7},
{'override_partitions': [], 'override_devices': ['d9', 'd20'],
'multiprocess_worker_index': 8},
{'override_partitions': [], 'override_devices': ['d10', 'd21'],
'multiprocess_worker_index': 9},
{'override_partitions': [], 'override_devices': ['d11'],
'multiprocess_worker_index': 10},
], list(reconstructor.get_worker_args()))
def test_next_rcache_update_configured_with_stats_interval(self):
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor = object_reconstructor.ObjectReconstructor(
{}, logger=self.logger)
self.assertEqual(now + 300, reconstructor._next_rcache_update)
reconstructor = object_reconstructor.ObjectReconstructor(
{'stats_interval': '30'}, logger=self.logger)
self.assertEqual(now + 30, reconstructor._next_rcache_update)
def test_is_healthy_rcache_update_waits_for_next_update(self):
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
# file does not exist to start
self.assertFalse(os.path.exists(self.rcache))
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=10):
self.assertTrue(reconstructor.is_healthy())
# ... and isn't created until _next_rcache_update
self.assertFalse(os.path.exists(self.rcache))
# ... but if we wait 5 mins (by default)
orig_next_update = reconstructor._next_rcache_update
with mock.patch('swift.obj.reconstructor.time.time',
return_value=now + 301):
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=11):
self.assertTrue(reconstructor.is_healthy())
self.assertGreater(reconstructor._next_rcache_update, orig_next_update)
# ... it will be created
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
# and empty
self.assertEqual({}, data)
def test_is_healthy(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=10):
self.assertTrue(reconstructor.is_healthy())
reconstructor.get_local_devices = lambda: {
'sdb%d' % p for p in reconstructor.policies}
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=11):
self.assertFalse(reconstructor.is_healthy())
reconstructor.all_local_devices = {
'sdb%d' % p for p in reconstructor.policies}
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=12):
self.assertTrue(reconstructor.is_healthy())
def test_is_healthy_detects_ring_change(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path,
'reconstructor_workers': 1,
# bind ip and port will not match any dev in first version of ring
'bind_ip': '10.0.0.20', 'bind_port': '1020'},
logger=self.logger)
p = random.choice(reconstructor.policies)
self.assertEqual(14, len(p.object_ring.devs)) # sanity check
worker_args = list(reconstructor.get_worker_args())
self.assertFalse(worker_args[0]['override_devices']) # no local devs
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=10):
self.assertTrue(reconstructor.is_healthy())
# expand ring - now there are local devices
p.object_ring.set_replicas(28)
self.assertEqual(28, len(p.object_ring.devs)) # sanity check
# If ring.gz mtime did not change, there is no change to detect
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=10):
self.assertTrue(reconstructor.is_healthy())
# Now, ring.gz mtime changed, so the change will be detected
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=11):
self.assertFalse(reconstructor.is_healthy())
self.assertNotEqual(worker_args, list(reconstructor.get_worker_args()))
with mock.patch('swift.obj.reconstructor.os.path.getmtime',
return_value=12):
self.assertTrue(reconstructor.is_healthy())
def test_final_recon_dump(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.all_local_devices = ['sda', 'sdc']
total = 12.0
now = time.time()
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
total = 14.0
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
def check_per_disk_stats(before, now, old_total, total,
override_devices):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=now), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'):
reconstructor.final_recon_dump(
total, override_devices=override_devices)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': before,
'object_reconstruction_time': old_total,
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': now,
'object_reconstruction_time': total,
'pid': 'pid-1',
},
'sdc': {
'object_reconstruction_last': now,
'object_reconstruction_time': total,
'pid': 'pid-1',
},
},
}, data)
# per_disk_stats with workers and local_devices
reconstructor.reconstructor_workers = 1
old_total = total
total = 16.0
before = now
now += total * 60
check_per_disk_stats(before, now, old_total, total, ['sda', 'sdc'])
# per_disk_stats with workers and local_devices but no overrides
reconstructor.reconstructor_workers = 1
total = 17.0
now += total * 60
check_per_disk_stats(before, now, old_total, total, [])
# and without workers we clear it out
reconstructor.reconstructor_workers = 0
total = 18.0
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
# set per disk stats again...
reconstructor.reconstructor_workers = 1
old_total = total
total = 18.0
before = now
now += total * 60
check_per_disk_stats(before, now, old_total, total, ['sda', 'sdc'])
# ...then remove all devices and check we clear out per-disk stats
reconstructor.all_local_devices = []
total = 20.0
now += total * 60
with mock.patch('swift.obj.reconstructor.time.time', return_value=now):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': now,
'object_reconstruction_time': total,
}, data)
def test_dump_recon_run_once_inline(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
with mock.patch('swift.obj.reconstructor.time.time', side_effect=[
now, later, later]):
reconstructor.run_once()
# no override args passed to reconstruct
self.assertEqual([mock.call(
override_devices=[],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# script mode with no override args, we expect recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
}, data)
total = 10.0
later += total * 60
with mock.patch('swift.obj.reconstructor.time.time',
return_value=later):
reconstructor.final_recon_dump(total)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
}, data)
def test_dump_recon_run_once_in_worker(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path,
'reconstructor_workers': 1},
logger=self.logger)
reconstructor.get_local_devices = lambda: {'sda'}
now = time.time()
later = now + 300 # 5 mins
def do_test(run_kwargs, expected_device):
# get the actual kwargs that would be passed to run_once in a
# worker
run_once_kwargs = list(
reconstructor.get_worker_args(once=True, **run_kwargs))[0]
reconstructor.reconstruct = mock.MagicMock()
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]):
reconstructor.run_once(**run_once_kwargs)
self.assertEqual([mock.call(
override_devices=[expected_device],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
# no aggregate is written but perhaps it should be, in which
# case this assertion will need to change
'object_reconstruction_per_disk': {
expected_device: {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': mock.ANY
}
}
}, data)
# script mode with no CLI override args, we expect recon dumps
do_test({}, 'sda')
# script mode *with* CLI override devices, we expect recon dumps
os.unlink(self.rcache)
do_test(dict(devices='sda'), 'sda')
# if the override device is not in local devices we still get
# a recon dump, but it'll get cleaned up in the next aggregation
os.unlink(self.rcache)
do_test(dict(devices='sdz'), 'sdz')
# repeat with no local devices
reconstructor.get_local_devices = lambda: set()
os.unlink(self.rcache)
do_test(dict(devices='sdz'), 'sdz')
# now disable workers and check that inline run_once updates rcache
# and clears out per disk stats
reconstructor.get_local_devices = lambda: {'sda'}
now = time.time()
later = now + 600 # 10 mins
reconstructor.reconstructor_workers = 0
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]):
reconstructor.run_once()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
}, data)
def test_no_dump_recon_run_once(self):
reconstructor = object_reconstructor.ObjectReconstructor(
{'recon_cache_path': self.recon_cache_path},
logger=self.logger)
reconstructor.get_local_devices = lambda: {'sda', 'sdb', 'sdc'}
def do_test(run_once_kwargs, expected_devices, expected_partitions):
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
with mock.patch('swift.obj.reconstructor.time.time', side_effect=[
now, later, later]):
reconstructor.run_once(**run_once_kwargs)
# override args passed to reconstruct
actual_calls = reconstructor.reconstruct.call_args_list
self.assertEqual({'override_devices', 'override_partitions'},
set(actual_calls[0][1]))
self.assertEqual(sorted(expected_devices),
sorted(actual_calls[0][1]['override_devices']))
self.assertEqual(sorted(expected_partitions),
sorted(actual_calls[0][1]['override_partitions']))
self.assertFalse(actual_calls[1:])
self.assertEqual(False, os.path.exists(self.rcache))
# inline mode with overrides never does recon dump
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sda,sdb'}
do_test(kwargs, ['sda', 'sdb'], [])
# Have partition override, so no recon dump
kwargs = {'partitions': '1,2,3'}
do_test(kwargs, [], [1, 2, 3])
reconstructor.reconstructor_workers = 1
worker_kwargs = list(
reconstructor.get_worker_args(once=True, **kwargs))[0]
do_test(worker_kwargs, ['sda', 'sdb', 'sdc'], [1, 2, 3])
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sda,sdb', 'partitions': '1,2,3'}
do_test(kwargs, ['sda', 'sdb'], [1, 2, 3])
reconstructor.reconstructor_workers = 1
worker_kwargs = list(
reconstructor.get_worker_args(once=True, **kwargs))[0]
do_test(worker_kwargs, ['sda', 'sdb'], [1, 2, 3])
# 'sdz' is not in local devices
reconstructor.reconstructor_workers = 0
kwargs = {'devices': 'sdz'}
do_test(kwargs, ['sdz'], [])
def test_run_forever_recon_aggregation(self):
class StopForever(Exception):
pass
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.get_local_devices = lambda: ['sda', 'sdb', 'sdc', 'sdd']
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
later = now + 300 # 5 mins
worker_args = list(
# include 'devices' kwarg as a sanity check - it should be ignored
# in run_forever mode
reconstructor.get_worker_args(once=False, devices='sda'))
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'), \
mock.patch('swift.obj.reconstructor.sleep',
side_effect=[StopForever]), \
Timeout(.3), quiet_eventlet_exceptions(), \
self.assertRaises(StopForever):
gt = spawn(reconstructor.run_forever, **worker_args[0])
gt.wait()
# override args are passed to reconstruct
self.assertEqual([mock.call(
override_devices=['sda', 'sdc'],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# forever mode with override args, we expect per-disk recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdc': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
}
}, data)
reconstructor.reconstruct.reset_mock()
# another worker would get *different* disks
before = now = later
later = now + 300 # 5 more minutes
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now, later, later]), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-2'), \
mock.patch('swift.obj.reconstructor.sleep',
side_effect=[StopForever]), \
Timeout(.3), quiet_eventlet_exceptions(), \
self.assertRaises(StopForever):
gt = spawn(reconstructor.run_forever, **worker_args[1])
gt.wait()
# override args are parsed
self.assertEqual([mock.call(
override_devices=['sdb', 'sdd'],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# forever mode with override args, we expect per-disk recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdb': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
'sdc': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdd': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
}
}, data)
# aggregation is done in the parent thread even later
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': later,
'object_reconstruction_time': 10.0,
'object_reconstruction_per_disk': {
'sda': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdb': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
'sdc': {
'object_reconstruction_last': before,
'object_reconstruction_time': 5.0,
'pid': 'pid-1',
},
'sdd': {
'object_reconstruction_last': later,
'object_reconstruction_time': 5.0,
'pid': 'pid-2',
},
}
}, data)
def test_run_forever_recon_no_devices(self):
class StopForever(Exception):
pass
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
def run_forever_but_stop(pid, mock_times, worker_kwargs):
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=mock_times), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value=pid), \
mock.patch('swift.obj.reconstructor.sleep',
side_effect=[StopForever]), \
Timeout(.3), quiet_eventlet_exceptions(), \
self.assertRaises(StopForever):
gt = spawn(reconstructor.run_forever, **worker_kwargs)
gt.wait()
reconstructor.reconstruct = mock.MagicMock()
now = time.time()
# first run_forever with no devices
reconstructor.get_local_devices = lambda: []
later = now + 6 # 6 sec
worker_args = list(
# include 'devices' kwarg as a sanity check - it should be ignored
# in run_forever mode
reconstructor.get_worker_args(once=False, devices='sda'))
run_forever_but_stop('pid-1', [now, later, later], worker_args[0])
# override args are passed to reconstruct
self.assertEqual([mock.call(
override_devices=[],
override_partitions=[]
)], reconstructor.reconstruct.call_args_list)
# forever mode with no args, we expect total recon dumps
self.assertTrue(os.path.exists(self.rcache))
with open(self.rcache) as f:
data = json.load(f)
expected = {
'object_reconstruction_last': later,
'object_reconstruction_time': 0.1,
}
self.assertEqual(expected, data)
reconstructor.reconstruct.reset_mock()
# aggregation is done in the parent thread even later
now = later + 300
with mock.patch('swift.obj.reconstructor.time.time',
side_effect=[now]):
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(expected, data)
def test_recon_aggregation_waits_for_all_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set([
'd0', 'd1', 'd2', 'd3',
# unreported device definitely matters
'd4'])
start = time.time() - 1000
for i in range(4):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + (300 * i)), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-%s' % i):
reconstructor.final_recon_dump(
i, override_devices=['d%s' % i])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# unreported device d4 prevents aggregation
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertNotIn('object_reconstruction_last', data)
self.assertNotIn('object_reconstruction_time', data)
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# it's idempotent
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertNotIn('object_reconstruction_last', data)
self.assertNotIn('object_reconstruction_time', data)
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# remove d4, we no longer wait on it for aggregation
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(start + 900, data['object_reconstruction_last'])
self.assertEqual(15, data['object_reconstruction_time'])
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
def test_recon_aggregation_removes_devices(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
start = time.time() - 1000
for i in range(4):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + (300 * i)), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-%s' % i):
reconstructor.final_recon_dump(
i, override_devices=['d%s' % i])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(start + 900, data['object_reconstruction_last'])
self.assertEqual(15, data['object_reconstruction_time'])
self.assertEqual(set(['d0', 'd1', 'd2', 'd3']),
set(data['object_reconstruction_per_disk'].keys()))
# it's idempotent
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 15,
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# if a device is removed from the ring
reconstructor.all_local_devices = set(['d1', 'd2', 'd3'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
# ... it's per-disk stats are removed (d0)
self.assertEqual({
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 11,
'object_reconstruction_per_disk': {
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
# which can affect the aggregates!
reconstructor.all_local_devices = set(['d1', 'd2'])
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 6,
'object_reconstruction_per_disk': {
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
}
}, data)
def test_recon_aggregation_at_end_of_run_once(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set(['d0', 'd1', 'd2', 'd3'])
start = time.time() - 1000
for i in range(4):
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + (300 * i)), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-%s' % i):
reconstructor.final_recon_dump(
i, override_devices=['d%s' % i])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 0.0,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 300,
'object_reconstruction_time': 1,
'pid': 'pid-1',
},
'd2': {
'object_reconstruction_last': start + 600,
'object_reconstruction_time': 2,
'pid': 'pid-2',
},
'd3': {
'object_reconstruction_last': start + 900,
'object_reconstruction_time': 3,
'pid': 'pid-3',
},
}
}, data)
reconstructor.post_multiprocess_run()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual(start + 900, data['object_reconstruction_last'])
self.assertEqual(15, data['object_reconstruction_time'])
def test_recon_aggregation_races_with_final_recon_dump(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 2,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
reconstructor.all_local_devices = set(['d0', 'd1'])
start = time.time() - 1000
# first worker dumps to recon cache
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-0'):
reconstructor.final_recon_dump(
1, override_devices=['d0'])
# sanity
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
}
}, data)
# simulate a second worker concurrently dumping to recon cache while
# parent is aggregating existing results; mock dump_recon_cache as a
# convenient way to interrupt parent aggregate_recon_update and 'pass
# control' to second worker
updated_data = [] # state of recon cache just after second worker dump
def simulate_other_process_final_recon_dump():
with mock.patch('swift.obj.reconstructor.time.time',
return_value=start + 999), \
mock.patch('swift.obj.reconstructor.os.getpid',
return_value='pid-1'):
reconstructor.final_recon_dump(
1000, override_devices=['d1'])
with open(self.rcache) as f:
updated_data.append(json.load(f))
def fake_dump_recon_cache(*args, **kwargs):
# temporarily put back real dump_recon_cache
with mock.patch('swift.obj.reconstructor.dump_recon_cache',
dump_recon_cache):
simulate_other_process_final_recon_dump()
# and now proceed with parent dump_recon_cache
dump_recon_cache(*args, **kwargs)
reconstructor.dump_recon_cache = fake_dump_recon_cache
with mock.patch('swift.obj.reconstructor.dump_recon_cache',
fake_dump_recon_cache):
reconstructor.aggregate_recon_update()
self.assertEqual([{ # sanity check - second process did dump its data
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}], updated_data)
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}, data)
# next aggregation will find d1 stats
reconstructor.aggregate_recon_update()
with open(self.rcache) as f:
data = json.load(f)
self.assertEqual({
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'object_reconstruction_per_disk': {
'd0': {
'object_reconstruction_last': start,
'object_reconstruction_time': 1,
'pid': 'pid-0',
},
'd1': {
'object_reconstruction_last': start + 999,
'object_reconstruction_time': 1000,
'pid': 'pid-1',
},
}
}, data)
def test_worker_logging(self):
reconstructor = object_reconstructor.ObjectReconstructor({
'reconstructor_workers': 4,
'recon_cache_path': self.recon_cache_path
}, logger=self.logger)
def log_some_stuff(*a, **kw):
reconstructor.logger.debug("debug message")
reconstructor.logger.info("info message")
reconstructor.logger.warning("warning message")
reconstructor.logger.error("error message")
with mock.patch.object(reconstructor, 'reconstruct',
log_some_stuff), \
mock.patch("os.getpid", lambda: 20641):
reconstructor.get_worker_args()
reconstructor.run_once(multiprocess_worker_index=1,
override_devices=['sda', 'sdb'])
prefix = "[worker 2/4 pid=20641] "
for level, lines in self.logger.logger.all_log_lines().items():
for line in lines:
self.assertTrue(
line.startswith(prefix),
"%r doesn't start with %r (level %s)" % (
line, prefix, level))
@patch_policies(with_ec_default=True)
class BaseTestObjectReconstructor(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
self.policy = POLICIES.default
self.policy.object_ring._rtime = time.time() + 3600
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'devices')
self.local_dev = self.policy.object_ring.devs[0]
self.ip = self.local_dev['replication_ip']
self.port = self.local_dev['replication_port']
self.conf = {
'devices': self.devices,
'mount_check': False,
'bind_ip': self.ip,
'bind_port': self.port,
}
self.logger = debug_logger('object-reconstructor')
self._configure_reconstructor()
self.policy.object_ring.max_more_nodes = \
self.policy.object_ring.replicas
self.ts_iter = make_timestamp_iter()
self.fabricated_ring = FabricatedRing(replicas=14, devices=28)
def _configure_reconstructor(self, **kwargs):
self.conf.update(kwargs)
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.reconstructor._reset_stats()
# some tests bypass build_reconstruction_jobs and go to process_job
# directly, so you end up with a /0 when you try to show the
# percentage of complete jobs as ratio of the total job count
self.reconstructor.job_count = 1
# if we ever let a test through without properly patching the
# REPLICATE and SSYNC calls - let's fail sort fast-ish
self.reconstructor.lockup_timeout = 3
def tearDown(self):
self.reconstructor._reset_stats()
self.reconstructor.stats_line()
shutil.rmtree(self.testdir)
def ts(self):
return next(self.ts_iter)
class TestObjectReconstructor(BaseTestObjectReconstructor):
def test_ring_ip_and_bind_ip(self):
# make clean base_conf
base_conf = dict(self.conf)
for key in ('bind_ip', 'ring_ip'):
base_conf.pop(key, None)
# default ring_ip is always 0.0.0.0
self.conf = base_conf
self._configure_reconstructor()
self.assertEqual('0.0.0.0', self.reconstructor.ring_ip)
# bind_ip works fine for legacy configs
self.conf = dict(base_conf)
self.conf['bind_ip'] = '192.168.1.42'
self._configure_reconstructor()
self.assertEqual('192.168.1.42', self.reconstructor.ring_ip)
# ring_ip works fine by-itself
self.conf = dict(base_conf)
self.conf['ring_ip'] = '192.168.1.43'
self._configure_reconstructor()
self.assertEqual('192.168.1.43', self.reconstructor.ring_ip)
# if you have both ring_ip wins
self.conf = dict(base_conf)
self.conf['bind_ip'] = '192.168.1.44'
self.conf['ring_ip'] = '192.168.1.45'
self._configure_reconstructor()
self.assertEqual('192.168.1.45', self.reconstructor.ring_ip)
def test_handoffs_only_default(self):
# sanity neither option added to default conf
self.conf.pop('handoffs_first', None)
self.conf.pop('handoffs_only', None)
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
def test_handoffs_first_enables_handoffs_only(self):
self.conf['handoffs_first'] = "True"
self.conf.pop('handoffs_only', None) # sanity
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor '
'of handoffs_only. This option may be ignored in a '
'future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_ignores_handoffs_first(self):
self.conf['handoffs_first'] = "True"
self.conf['handoffs_only'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Ignored handoffs_first option in favor of handoffs_only.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_enabled(self):
self.conf.pop('handoffs_first', None) # sanity
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_true_and_first_true(self):
self.conf['handoffs_first'] = "True"
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_false_and_first_false(self):
self.conf['handoffs_only'] = "False"
self.conf['handoffs_first'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_none_and_first_false(self):
self.conf['handoffs_first'] = "False"
self.conf.pop('handoffs_only', None) # sanity
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
]
self.assertEqual(expected, warnings)
def test_handoffs_only_false_and_first_none(self):
self.conf.pop('handoffs_first', None) # sanity
self.conf['handoffs_only'] = "False"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertFalse(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
self.assertFalse(warnings)
def test_handoffs_only_true_and_first_false(self):
self.conf['handoffs_first'] = "False"
self.conf['handoffs_only'] = "True"
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.assertTrue(self.reconstructor.handoffs_only)
warnings = self.logger.get_lines_for_level('warning')
expected = [
'The handoffs_first option is deprecated in favor of '
'handoffs_only. This option may be ignored in a future release.',
'Handoff only mode is not intended for normal operation, '
'use handoffs_only with care.',
]
self.assertEqual(expected, warnings)
def test_two_ec_policies(self):
with patch_policies([
StoragePolicy(0, name='zero', is_deprecated=True),
ECStoragePolicy(1, name='one', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=3),
ECStoragePolicy(2, name='two',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2)],
fake_ring_args=[
{}, {'replicas': 7}, {'replicas': 10}]):
self._configure_reconstructor()
jobs = []
def process_job(job):
jobs.append(job)
self.reconstructor.process_job = process_job
os.makedirs(os.path.join(self.devices, 'sda', 'objects-1', '0'))
self.reconstructor.run_once()
self.assertEqual(1, len(jobs))
def test_collect_parts_skips_non_ec_policy_and_device(self):
stub_parts = (371, 78, 419, 834)
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for part in stub_parts:
utils.mkdirs(os.path.join(
self.devices, self.local_dev['device'],
datadir, str(part)))
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
self.assertEqual(found_parts, sorted(stub_parts))
for part_info in part_infos:
self.assertEqual(part_info['local_dev'], self.local_dev)
self.assertEqual(part_info['policy'], self.policy)
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_skips_non_local_devs_servers_per_port(self):
self._configure_reconstructor(devices=self.devices, mount_check=False,
bind_ip=self.ip, bind_port=self.port,
servers_per_port=2)
device_parts = {
'sda': (374,),
'sdb': (179, 807), # w/one-serv-per-port, same IP alone is local
'sdc': (363, 468, 843),
'sdd': (912,), # "not local" via different IP
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdb', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port + 1 if dev == 'sdb' else self.port,
} for i, dev in enumerate(local_devs)]
stub_ring_devs.append({
'id': len(local_devs),
'device': 'sdd',
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_multi_device_skips_non_non_local_devs(self):
device_parts = {
'sda': (374,),
'sdb': (179, 807), # "not local" via different port
'sdc': (363, 468, 843),
'sdd': (912,), # "not local" via different IP
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port,
} for i, dev in enumerate(local_devs)]
stub_ring_devs.append({
'id': len(local_devs),
'device': 'sdb',
'replication_ip': self.ip,
'replication_port': self.port + 1, # not local via port
})
stub_ring_devs.append({
'id': len(local_devs) + 1,
'device': 'sdd',
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_multi_device_skips_non_ring_devices(self):
device_parts = {
'sda': (374,),
'sdc': (363, 468, 843),
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port,
} for i, dev in enumerate(local_devs)]
self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertIn(part_info['local_dev'], stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_mount_check(self):
# each device has one part in it
local_devs = ('sda', 'sdb')
for i, dev in enumerate(local_devs):
datadir = diskfile.get_data_dir(self.policy)
utils.mkdirs(os.path.join(
self.devices, dev, datadir, str(i)))
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
paths = []
def fake_check_drive(devices, device, mount_check):
path = os.path.join(devices, device)
if (not mount_check) and os.path.isdir(path):
# while mount_check is false, the test still creates the dirs
paths.append(path)
return path
return None
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_drive',
fake_check_drive):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity, same jobs
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
# ... because fake_check_drive returned paths for both dirs
self.assertEqual(set(paths), set([
os.path.join(self.devices, dev) for dev in local_devs]))
# ... now with mount check
self._configure_reconstructor(mount_check=True)
self.assertTrue(self.reconstructor.mount_check)
paths = []
for policy in POLICIES:
self.assertTrue(self.reconstructor._df_router[policy].mount_check)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_drive',
fake_check_drive):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual([], part_infos) # sanity, no jobs
# ... because fake_check_drive returned False for both paths
self.assertFalse(paths)
def fake_check_drive(devices, device, mount_check):
self.assertTrue(mount_check)
if device == 'sda':
return os.path.join(devices, device)
else:
return False
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.diskfile.check_drive',
fake_check_drive):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(1, len(part_infos)) # only sda picked up (part 0)
self.assertEqual(part_infos[0]['partition'], 0)
def test_collect_parts_cleans_tmp(self):
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
for device in local_devs:
utils.mkdirs(os.path.join(self.devices, device))
fake_unlink = mock.MagicMock()
self._configure_reconstructor(reclaim_age=1000)
now = time.time()
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.time.time',
return_value=now), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs), \
mock.patch('swift.obj.reconstructor.unlink_older_than',
fake_unlink):
self.assertEqual([], list(self.reconstructor.collect_parts()))
# each local device hash unlink_older_than called on it,
# with now - self.reclaim_age
tmpdir = diskfile.get_tmp_dir(self.policy)
expected = now - 1000
self.assertEqual(fake_unlink.mock_calls, [
mock.call(os.path.join(self.devices, dev, tmpdir), expected)
for dev in local_devs])
def test_collect_parts_creates_datadir(self):
# create just the device path
dev_path = os.path.join(self.devices, self.local_dev['device'])
utils.mkdirs(dev_path)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
datadir_path = os.path.join(dev_path,
diskfile.get_data_dir(self.policy))
self.assertTrue(os.path.exists(datadir_path))
def test_collect_parts_creates_datadir_error(self):
# create just the device path
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.mkdirs',
side_effect=OSError('kaboom!')):
self.assertEqual([], list(self.reconstructor.collect_parts()))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1,
'Expected only one error, got %r' % error_lines)
line = error_lines[0]
self.assertIn('Unable to create', line)
self.assertIn(datadir_path, line)
def test_collect_parts_skips_invalid_paths(self):
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with open(datadir_path, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
self.assertTrue(os.path.exists(datadir_path))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1,
'Expected only one error, got %r' % error_lines)
line = error_lines[0]
self.assertIn('Unable to list partitions', line)
self.assertIn(datadir_path, line)
def test_reconstruct_removes_non_partition_files(self):
# create some junk next to partitions
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
num_parts = 3
for part in range(num_parts):
utils.mkdirs(os.path.join(datadir_path, str(part)))
# Add some clearly non-partition dentries
utils.mkdirs(os.path.join(datadir_path, 'not/a/partition'))
for junk_name in ('junk', '1234'):
junk_file = os.path.join(datadir_path, junk_name)
with open(junk_file, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch('swift.obj.reconstructor.'
'ObjectReconstructor.process_job'):
self.reconstructor.reconstruct()
# all the bad gets cleaned up
errors = []
for junk_name in ('junk', '1234', 'not'):
junk_file = os.path.join(datadir_path, junk_name)
if os.path.exists(junk_file):
errors.append('%s still exists!' % junk_file)
self.assertFalse(errors)
error_lines = self.logger.get_lines_for_level('warning')
self.assertIn('Unexpected entity in data dir: %r'
% os.path.join(datadir_path, 'not'), error_lines)
self.assertIn('Unexpected entity in data dir: %r'
% os.path.join(datadir_path, 'junk'), error_lines)
self.assertIn('Unexpected entity %r is not a directory'
% os.path.join(datadir_path, '1234'), error_lines)
self.assertEqual(self.reconstructor.reconstruction_part_count, 6)
def test_collect_parts_overrides(self):
# setup multiple devices, with multiple parts
device_parts = {
'sda': (374, 843),
'sdb': (179, 807),
'sdc': (363, 468, 843),
}
datadir = diskfile.get_data_dir(self.policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'id': i,
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for i, dev in enumerate(local_devs)]
expected = (
({}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda', 'sdc']}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sdc']}, [
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda']}, [
('sda', 374),
('sda', 843),
]),
({'override_devices': ['sdx']}, []),
({'override_partitions': [374]}, [
('sda', 374),
]),
({'override_partitions': [843]}, [
('sda', 843),
('sdc', 843),
]),
({'override_partitions': [843], 'override_devices': ['sda']}, [
('sda', 843),
]),
)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs):
for kwargs, expected_parts in expected:
part_infos = list(self.reconstructor.collect_parts(**kwargs))
expected_paths = set(
os.path.join(self.devices, dev, datadir, str(part))
for dev, part in expected_parts)
found_paths = set(p['part_path'] for p in part_infos)
msg = 'expected %r != %r for %r' % (
expected_paths, found_paths, kwargs)
self.assertEqual(expected_paths, found_paths, msg)
def test_build_jobs_creates_empty_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
utils.mkdirs(part_path)
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 3)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
self.assertTrue(os.path.exists(hashes_file))
suffixes = self.reconstructor._get_hashes(
self.local_dev['device'], 0, self.policy, do_listdir=True)
self.assertEqual(suffixes, {})
def test_build_jobs_no_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
stub_hashes = {}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 3)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_primary(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
frag_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(job['suffixes'], list(stub_hashes.keys()))
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas,
(frag_index + int(0.5 * ring.replicas)),
]))
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], stub_hashes)
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_handoff(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# since this part doesn't belong on us it doesn't matter what
# frag_index we have
frag_index = random.randint(0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs), 'Expected only one job, got %r' % jobs)
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.REVERT)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(stub_hashes.keys()))
self.assertEqual(
self.policy.ec_duplication_factor, len(job['sync_to']))
# the sync_to node should be different each other
node_ids = set([node['id'] for node in job['sync_to']])
self.assertEqual(len(node_ids),
self.policy.ec_duplication_factor)
# but all the nodes have same backend index to sync
node_indexes = set(
self.policy.get_backend_index(node['index'])
for node in job['sync_to'])
self.assertEqual(1, len(node_indexes))
self.assertEqual(job['sync_to'][0]['index'], frag_index)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['partition'], partition)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['local_dev'], self.local_dev)
def test_build_jobs_mixed(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
node_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
frag_index = self.policy.get_backend_index(node_index)
other_frag_index = random.choice(
[f for f in range(self.policy.ec_n_unique_fragments)
if f != node_index])
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'456': {other_frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(2, len(jobs))
sync_jobs, revert_jobs = [], []
for job in jobs:
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
{
object_reconstructor.SYNC: sync_jobs,
object_reconstructor.REVERT: revert_jobs,
}[job['job_type']].append(job)
self.assertEqual(1, len(sync_jobs))
job = sync_jobs[0]
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(['123', 'abc']))
self.assertEqual(len(job['sync_to']), 3)
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas,
(frag_index + int(0.5 * ring.replicas)),
]))
self.assertEqual(1, len(revert_jobs))
job = revert_jobs[0]
self.assertEqual(job['frag_index'], other_frag_index)
self.assertEqual(job['suffixes'], ['456'])
self.assertEqual(len(job['sync_to']),
self.policy.ec_duplication_factor)
self.assertEqual(job['sync_to'][0]['index'], other_frag_index)
def test_build_jobs_revert_only_tombstones(self):
ring = self.policy.object_ring = self.fabricated_ring
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# we have no fragment index to hint the jobs where they belong
stub_hashes = {
'123': {None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(len(jobs), 1, 'Expected only one job, got %r' % jobs)
job = jobs[0]
expected = {
'job_type': object_reconstructor.REVERT,
'frag_index': None,
'suffixes': list(stub_hashes.keys()),
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
self.assertEqual(ring.replica_count, len(part_nodes))
expected_samples = (
(self.policy.ec_n_unique_fragments *
self.policy.ec_duplication_factor) -
self.policy.ec_ndata + 1)
self.assertEqual(len(job['sync_to']), expected_samples)
for k, v in expected.items():
msg = 'expected %s != %s for %s' % (
v, job[k], k)
self.assertEqual(v, job[k], msg)
def test_get_suffixes_to_sync(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '1')
utils.mkdirs(part_path)
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 1,
'part_path': part_path,
}
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
node = job['sync_to'][0]
# process_job used to try and modify the instance base headers
self.reconstructor.headers['X-Backend-Storage-Policy-Index'] = \
int(POLICIES[1])
# ... which doesn't work out under concurrency with multiple policies
self.assertNotEqual(
self.reconstructor.headers['X-Backend-Storage-Policy-Index'],
int(job['policy']))
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
suffixes, new_node = self.reconstructor._get_suffixes_to_sync(
job, node)
self.assertEqual([int(job['policy'])], [
r['headers']['X-Backend-Storage-Policy-Index']
for r in request_log.requests])
self.assertEqual(suffixes, [])
self.assertEqual(new_node, node)
def test_get_suffixes_in_sync(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '1')
utils.mkdirs(part_path)
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 1,
'part_path': part_path,
}
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
node = job['sync_to'][0]
local_hashes = {
'123': {job['frag_index']: 'hash', None: 'hash'},
'abc': {job['frag_index']: 'hash', None: 'hash'},
}
self.assertEqual(node['index'], self.policy.object_ring.replicas - 1)
remote_index = self.policy.get_backend_index(node['index'])
remote_hashes = {
'123': {remote_index: 'hash', None: 'hash'},
'abc': {remote_index: 'hash', None: 'hash'},
}
remote_response = pickle.dumps(remote_hashes)
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, local_hashes)), \
mocked_http_conn(200, body=remote_response) as request_log:
suffixes, new_node = self.reconstructor._get_suffixes_to_sync(
job, node)
self.assertEqual([node['replication_ip']],
[r['ip'] for r in request_log.requests])
self.assertEqual(suffixes, [])
self.assertEqual(new_node, node)
def test_get_suffix_delta(self):
# different
local_suff = {'123': {None: 'abc', 0: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
local_index = 0
remote_index = 0
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now the same
remote_suff = {'123': {None: 'abc', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, [])
# now with a mis-matched None key (missing durable)
remote_suff = {'123': {None: 'ghi', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now with bogus local index
local_suff = {'123': {None: 'abc', 99: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
def test_process_job_primary_in_sync(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
local_dev = random.choice(part_nodes)
frag_index = self.policy.get_backend_index(local_dev['index'])
sync_to = object_reconstructor._get_partners(
local_dev['index'], part_nodes)
# setup left, right and far hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_frag_index = self.policy.get_backend_index(sync_to[0]['index'])
left_hashes = {
'123': {left_frag_index: 'hash', None: 'hash'},
'abc': {left_frag_index: 'hash', None: 'hash'},
}
right_frag_index = self.policy.get_backend_index(sync_to[1]['index'])
right_hashes = {
'123': {right_frag_index: 'hash', None: 'hash'},
'abc': {right_frag_index: 'hash', None: 'hash'},
}
far_index = self.policy.get_backend_index(sync_to[2]['index'])
far_hashes = {
'123': {far_index: 'hash', None: 'hash'},
'abc': {far_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes, far_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = [
(sync_to[0]['ip'], '/%s/0' % sync_to[0]['device']),
(sync_to[1]['ip'], '/%s/0' % sync_to[1]['device']),
(sync_to[2]['ip'], '/%s/0' % sync_to[2]['device']),
]
self.assertEqual(expected_suffix_calls,
[(r['ip'], r['path']) for r in request_log.requests])
self.assertFalse(ssync_calls)
def test_process_job_primary_not_in_sync(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
local_dev = random.choice(part_nodes)
frag_index = self.policy.get_backend_index(local_dev['index'])
sync_to = object_reconstructor._get_partners(
local_dev['index'], part_nodes)
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_hashes = {}
right_hashes = {}
far_hashes = {}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = []
for hashes in (left_hashes, right_hashes, far_hashes):
responses.append((200, pickle.dumps(hashes)))
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = [
(sync_to[0]['ip'], '/%s/0' % sync_to[0]['device']),
(sync_to[1]['ip'], '/%s/0' % sync_to[1]['device']),
(sync_to[2]['ip'], '/%s/0' % sync_to[2]['device']),
]
self.assertEqual(expected_suffix_calls,
[(r['ip'], r['path']) for r in request_log.requests])
expected_ssync_calls = sorted([
(sync_to[0]['ip'], 0, set(['123', 'abc']), False),
(sync_to[1]['ip'], 0, set(['123', 'abc']), False),
(sync_to[2]['ip'], 0, set(['123', 'abc']), False),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
c.get('include_non_durable'),
) for c in ssync_calls))
def test_sync_duplicates_to_remote_region(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
# in the non-duplicate case we just pick a random node
local_dev = random.choice(part_nodes[-14:])
frag_index = self.policy.get_backend_index(local_dev['index'])
sync_to = object_reconstructor._get_partners(
local_dev['index'], part_nodes)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
# left hand side is in sync
left_frag_index = self.policy.get_backend_index(sync_to[0]['index'])
left_hashes = {
'123': {left_frag_index: 'hash', None: 'hash'},
'abc': {left_frag_index: 'hash', None: 'hash'},
}
# right hand side needs sync
right_frag_index = self.policy.get_backend_index(sync_to[1]['index'])
right_hashes = {
'123': {right_frag_index: 'hash', None: 'hash'},
'abc': {right_frag_index: 'hashX', None: 'hash'},
}
far_index = self.policy.get_backend_index(sync_to[2]['index'])
far_hashes = {
'123': {far_index: 'hash', None: 'hash'},
'abc': {far_index: 'hash', None: 'hash'},
}
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
responses = [
(200, pickle.dumps(left_hashes)),
(200, pickle.dumps(right_hashes)),
(200, pickle.dumps(far_hashes)),
]
codes, body_iter = zip(*responses)
# we're going to dip our mocks into the ssync layer a bit
ssync_resp = mock.MagicMock()
ssync_resp.status = 200
ssync_resp.readline.side_effect = [
b':MISSING_CHECK: START',
b':MISSING_CHECK: END',
b':UPDATES: START',
b':UPDATES: END',
]
ssync_headers = []
def capture_headers(name, value):
ssync_headers.append((name, value))
ssync_conn = mock.MagicMock()
ssync_conn.getresponse.return_value = ssync_resp
ssync_conn.putheader = capture_headers
with mock.patch('swift.obj.ssync_sender.SsyncBufferedHTTPConnection',
return_value=ssync_conn), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mock.patch('swift.obj.diskfile.ECDiskFileManager.yield_hashes',
return_value=iter([])), \
mocked_http_conn(*codes, body_iter=body_iter):
self.reconstructor.process_job(job)
# ... to make sure it sets up our headers correctly
self.assertEqual(ssync_headers, [
('Transfer-Encoding', 'chunked'),
('X-Backend-Storage-Policy-Index', 0),
('X-Backend-Ssync-Frag-Index', right_frag_index),
# we include this for backwards compat
('X-Backend-Ssync-Node-Index', right_frag_index),
])
def test_process_job_sync_missing_durable(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
local_dev = random.choice(part_nodes)
frag_index = self.policy.get_backend_index(local_dev['index'])
sync_to = object_reconstructor._get_partners(
local_dev['index'], part_nodes)
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
# left hand side is in sync
left_frag_index = self.policy.get_backend_index(sync_to[0]['index'])
left_hashes = {
'123': {left_frag_index: 'hash', None: 'hash'},
'abc': {left_frag_index: 'hash', None: 'hash'},
}
# right hand side has fragment, but no durable (None key is whack)
right_frag_index = self.policy.get_backend_index(sync_to[1]['index'])
right_hashes = {
'123': {right_frag_index: 'hash', None: 'hash'},
'abc': {right_frag_index: 'hash',
None: 'different-because-durable'},
}
# far side is in sync
far_index = self.policy.get_backend_index(sync_to[2]['index'])
far_hashes = {
'123': {far_index: 'hash', None: 'hash'},
'abc': {far_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes, far_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
(sync_to[0]['ip'], '/%s/0' % sync_to[0]['device']),
(sync_to[1]['ip'], '/%s/0' % sync_to[1]['device']),
(sync_to[2]['ip'], '/%s/0' % sync_to[2]['device']),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
expected_ssync_calls = sorted([
(sync_to[1]['ip'], 0, ['abc'], False),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
c['suffixes'],
c.get('include_non_durable')
) for c in ssync_calls))
def test_process_job_primary_some_in_sync(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
local_dev = random.choice(part_nodes)
frag_index = self.policy.get_backend_index(local_dev['index'])
sync_to = object_reconstructor._get_partners(
local_dev['index'], part_nodes)
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_frag_index = self.policy.get_backend_index(sync_to[0]['index'])
left_hashes = {
'123': {left_frag_index: 'hashX', None: 'hash'},
'abc': {left_frag_index: 'hash', None: 'hash'},
}
right_frag_index = self.policy.get_backend_index(sync_to[1]['index'])
right_hashes = {
'123': {right_frag_index: 'hash', None: 'hash'},
}
far_index = self.policy.get_backend_index(sync_to[2]['index'])
far_hashes = {
'abc': {far_index: 'hashX', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = []
for hashes in (left_hashes, right_hashes, far_hashes):
responses.append((200, pickle.dumps(hashes)))
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
(sync_to[0]['ip'], '/%s/0' % sync_to[0]['device']),
(sync_to[1]['ip'], '/%s/0' % sync_to[1]['device']),
(sync_to[2]['ip'], '/%s/0' % sync_to[2]['device']),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertEqual(
dict(collections.Counter(
(c['node']['index'], tuple(sorted(c['suffixes'])),
c.get('include_non_durable'))
for c in ssync_calls)),
{(sync_to[0]['index'], ('123',), False): 1,
(sync_to[1]['index'], ('abc',), False): 1,
(sync_to[2]['index'], ('123', 'abc'), False): 1,
})
def test_process_job_primary_down(self):
partition = 0
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:3]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
expected_suffix_calls = set()
for node in part_nodes[:3]:
expected_suffix_calls.update([
(node['replication_ip'], '/%s/0' % node['device']),
])
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*[200] * len(expected_suffix_calls),
body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
expected_ssync_calls = sorted([
('10.0.0.0', 0, set(['123', 'abc']), False),
('10.0.0.1', 0, set(['123', 'abc']), False),
('10.0.0.2', 0, set(['123', 'abc']), False),
])
found_ssync_calls = sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
c.get('include_non_durable')
) for c in ssync_calls)
self.assertEqual(expected_ssync_calls, found_ssync_calls)
def test_process_job_suffix_call_errors(self):
partition = 0
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:2]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
expected_suffix_calls = set((
node['replication_ip'], '/%s/0' % node['device']
) for node in sync_to)
possible_errors = [404, Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors)
for r in expected_suffix_calls]
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertFalse(ssync_calls)
def test_process_job_sync_partner_unmounted(self):
partition = 0
part_nodes = self.policy.object_ring.get_part_nodes(partition)
frag_index = [n['id'] for n in part_nodes].index(self.local_dev['id'])
sync_to = object_reconstructor._get_partners(frag_index, part_nodes)
self.assertEqual(3, len(sync_to))
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
# left partner out of sync
left_frag_index = self.policy.get_backend_index(sync_to[0]['index'])
left_hashes = {
'123': {left_frag_index: 'not-in-sync-hash', None: 'hash'},
'abc': {left_frag_index: 'hash', None: 'hash'},
}
# we don't need right partner hashes
# far partner in sync
far_index = self.policy.get_backend_index(sync_to[2]['index'])
far_hashes = {
'123': {far_index: 'hash', None: 'hash'},
'abc': {far_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
responses = [
(200, pickle.dumps(left_hashes)), # hashes left partner
(507, ''), # unmounted right partner
(200, pickle.dumps({})), # hashes handoff
(200, pickle.dumps(far_hashes)), # hashes far partner
]
codes, body_iter = zip(*responses)
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)), \
mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
# increment frag_index since we're rebuilding to our right
frag_index = (frag_index + 1) % self.policy.ec_n_unique_fragments
handoffs = self.policy.object_ring.get_more_nodes(partition)
for i, handoff in enumerate(handoffs):
if i == frag_index:
break
else:
self.fail('Unable to find handoff?!')
expected = collections.Counter([
(200, sync_to[0]['ip']),
(507, sync_to[1]['ip']),
(200, handoff['ip']),
(200, sync_to[2]['ip']),
])
self.assertEqual(expected, collections.Counter(
[(c, r['ip']) for c, r in zip(codes, request_log.requests)]))
expected = collections.Counter([
sync_to[0]['ip'],
handoff['ip'],
])
self.assertEqual(expected, collections.Counter(
[c['node']['ip'] for c in ssync_calls]))
def test_process_job_handoff(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
ssync_calls = []
with mock_ssync_sender(ssync_calls), \
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
self.reconstructor.process_job(job)
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])),
c.get('include_non_durable'))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc'), True), 1)])
def test_process_job_will_not_revert_to_handoff(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mocked_http_conn() as request_log:
self.reconstructor.process_job(job)
# failed ssync job should not generate a suffix rehash
self.assertEqual([], request_log.requests)
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])),
c.get('include_non_durable'))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc'), True), 1)])
def test_process_job_revert_is_handoff_fails(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
handoff_nodes = list(self.policy.object_ring.get_more_nodes(partition))
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': handoff_nodes[-1],
'device': self.local_dev['device'],
}
def ssync_response_callback(*args):
# in this test ssync always fails, until we encounter ourselves in
# the list of possible handoff's to sync to, so handoffs_remaining
# should increment
return False, {}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback), \
mocked_http_conn() as request_log:
self.reconstructor.process_job(job)
# failed ssync job should not generate a suffix rehash
self.assertEqual([], request_log.requests)
# this is ssync call to primary (which fails) and nothing else!
self.assertEqual(
sorted(collections.Counter(
(c['node']['ip'], c['node']['port'], c['node']['device'],
tuple(sorted(c['suffixes'])),
c.get('include_non_durable'))
for c in ssync_calls).items()),
[((sync_to[0]['ip'], sync_to[0]['port'], sync_to[0]['device'],
('123', 'abc'), True), 1)])
self.assertEqual(self.reconstructor.handoffs_remaining, 1)
def test_process_job_revert_cleanup(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
with df.create() as writer:
test_data = b'test data'
writer.write(test_data)
metadata = {
'X-Timestamp': ts.internal,
'Content-Length': len(test_data),
'Etag': md5(test_data, usedforsecurity=False).hexdigest(),
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
writer.put(metadata)
writer.commit(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'primary_frag_index': None,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
def ssync_response_callback(*args):
# success should not increment handoffs_remaining
return True, {ohash: {'ts_data': ts}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
self.reconstructor.process_job(job)
# hashpath has been removed
self.assertFalse(os.path.exists(df._datadir))
self.assertEqual(self.reconstructor.handoffs_remaining, 0)
def test_process_job_revert_cleanup_but_already_reclaimed(self):
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts_delete = self.ts()
df.delete(ts_delete)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
fake_time = [float(ts_delete) + df_mgr.reclaim_age - 100]
def mock_time():
return fake_time[0]
def ssync_response_callback(*args):
# pretend ssync completed and time has moved just beyonf the
# reclaim age for the tombstone
fake_time[0] = float(ts_delete) + df_mgr.reclaim_age + 1
return True, {ohash: {'ts_data': ts_delete}}
ssync_calls = []
with mock.patch('swift.obj.diskfile.time.time', mock_time):
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
self.reconstructor.process_job(job)
self.assertFalse(os.path.exists(df._datadir))
self.assertEqual(self.reconstructor.handoffs_remaining, 0)
# check there's no tracebacks for opening the reclaimed tombstone
self.assertEqual(
[], self.reconstructor.logger.logger.get_lines_for_level('error'))
def _make_frag(self, df, fi, ts_data):
with df.create() as writer:
test_data = b'test data'
writer.write(test_data)
metadata = {
'X-Timestamp': ts_data.internal,
'Content-Length': len(test_data),
'Etag': md5(test_data, usedforsecurity=False).hexdigest(),
'X-Object-Sysmeta-Ec-Frag-Index': fi,
}
writer.put(metadata)
writer.commit(ts_data)
def _do_test_process_job_revert_cleanup_with_meta(self, frag_indexes,
primary_frag_index):
sync_to = [[dict(random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev]),
index=frag_index)] for frag_index in frag_indexes]
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
mkdirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts_data = self.ts()
for frag_index in frag_indexes:
self._make_frag(df, frag_index, ts_data)
if primary_frag_index is not None:
self._make_frag(df, primary_frag_index, ts_data)
ts_meta = self.ts()
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'testing'})
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
jobs = [{
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'primary_frag_index': primary_frag_index,
'suffixes': [suffix],
'sync_to': sync_to[i],
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
} for i, frag_index in enumerate(frag_indexes)]
ondisk_files_during_sync = []
def ssync_response_callback(*args):
ondisk_files_during_sync.append(os.listdir(df._datadir))
# success should not increment handoffs_remaining
return True, {ohash: {'ts_data': ts_data, 'ts_meta': ts_meta}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
for job in jobs:
self.reconstructor.process_job(job)
self.assertEqual(self.reconstructor.handoffs_remaining, 0)
self.assertEqual(len(jobs), len(ssync_calls))
self.assertEqual(len(jobs), len(ondisk_files_during_sync))
# verify that the meta file is intact at startof every job/ssync call:
# if it is removed at all, it should be removed in the *last* call
for fileset in ondisk_files_during_sync:
self.assertIn(ts_meta.internal + '.meta', fileset)
return df
def test_process_job_revert_does_cleanup_meta_pure_handoff(self):
# verify that danging meta files are cleaned up if the revert job is
# for a pure handoff partition
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
df = self._do_test_process_job_revert_cleanup_with_meta(
frag_indexes=[frag_index], primary_frag_index=None)
# hashpath has been removed
self.assertFalse(os.path.exists(df._datadir))
extra_index = frag_index
while extra_index == frag_index:
extra_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
df = self._do_test_process_job_revert_cleanup_with_meta(
frag_indexes=[frag_index, extra_index], primary_frag_index=None)
# hashpath has been removed
self.assertFalse(os.path.exists(df._datadir))
def test_process_job_revert_does_not_cleanup_meta_also_primary(self):
# verify that danging meta files are not cleaned up if the revert job
# is for a handoff partition that is also a primary for another frag
# index
frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
primary_frag_index = frag_index
while primary_frag_index == frag_index:
primary_frag_index = random.randint(
0, self.policy.ec_n_unique_fragments - 1)
df = self._do_test_process_job_revert_cleanup_with_meta(
frag_indexes=[frag_index], primary_frag_index=primary_frag_index)
# hashpath has not been removed
self.assertTrue(os.path.exists(df._datadir))
file_info = df._manager.cleanup_ondisk_files(df._datadir)
self.maxDiff = None
self.assertTrue('meta_file' in file_info)
self.assertTrue(os.path.exists(file_info['meta_file']))
self.assertTrue('data_info' in file_info)
self.assertEqual(primary_frag_index,
file_info['data_info']['frag_index'])
self.assertTrue(os.path.exists(file_info['data_file']))
# only the primary frag and meta file remain
self.assertEqual(2, len(os.listdir(df._datadir)))
def test_process_job_revert_does_not_cleanup_meta_new_data(self):
# verify that danging meta files are not cleaned up if the revert job
# is for a pure handoff partition that has a newer data frag in
# addition to the frag that was sync'd
frag_index = 0
extra_frag_index = 1
sync_to = [dict(random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev]),
index=frag_index)]
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
mkdirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts_data0 = self.ts() # original frag
ts_data1 = self.ts() # new one written during ssync
self._make_frag(df, frag_index, ts_data0)
ts_meta = self.ts()
df.write_metadata({'X-Timestamp': ts_meta.internal,
'X-Object-Meta-Test': 'testing'})
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'primary_frag_index': None,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
def ssync_response_callback(*args):
# pretend that during the ssync call the original frag is replaced
# by a newer one
self._make_frag(df, extra_frag_index, ts_data1)
return True, {ohash: {'ts_data': ts_data0, 'ts_meta': ts_meta}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
self.reconstructor.process_job(job)
self.assertEqual(1, len(ssync_calls))
# hashpath has not been removed
self.assertTrue(os.path.exists(df._datadir))
file_info = df._manager.cleanup_ondisk_files(df._datadir)
self.maxDiff = None
self.assertIsNotNone(file_info['meta_file'])
self.assertTrue(os.path.exists(file_info['meta_file']))
self.assertTrue('data_info' in file_info)
self.assertTrue(os.path.exists(file_info['data_file']))
# only the newer frag and meta file remain
self.assertEqual(2, len(os.listdir(df._datadir)))
self.assertEqual(ts_data1, file_info['data_info']['timestamp'])
def test_process_job_revert_cleanup_tombstone(self):
partition = 0
sync_to = [random.choice([
n for n in self.policy.object_ring.get_part_nodes(partition)
if n['id'] != self.local_dev['id']])]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
df.delete(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': None,
'primary_frag_index': None,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
def ssync_response_callback(*args):
return True, {ohash: {'ts_data': ts}}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
self.reconstructor.process_job(job)
# hashpath is still there, but it's empty
self.assertEqual([], os.listdir(df._datadir))
def test_get_local_devices(self):
local_devs = self.reconstructor.get_local_devices()
self.assertEqual({'sda'}, local_devs)
@patch_policies(legacy_only=True)
def test_get_local_devices_with_no_ec_policy_env(self):
# even no ec_policy found on the server, it runs just like as
# no ec device found
self._configure_reconstructor()
self.assertEqual([], self.reconstructor.policies)
local_devs = self.reconstructor.get_local_devices()
self.assertEqual(set(), local_devs)
@patch_policies(legacy_only=True)
def test_reconstruct_with_no_ec_policy_env(self):
self._configure_reconstructor()
self.assertEqual([], self.reconstructor.policies)
collect_parts_results = []
_orig_collect_parts = self.reconstructor.collect_parts
def capture_collect_parts(**kwargs):
part_infos = _orig_collect_parts(**kwargs)
collect_parts_results.append(part_infos)
return part_infos
with mock.patch.object(self.reconstructor, 'collect_parts',
capture_collect_parts):
self.reconstructor.reconstruct()
# There is one call, and it returns an empty list
self.assertEqual([[]], collect_parts_results)
log_lines = self.logger.all_log_lines()
self.assertEqual(log_lines, {'info': [mock.ANY]})
line = log_lines['info'][0]
self.assertTrue(line.startswith('Nothing reconstructed '), line)
class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
obj_name = b'o' # subclass overrides this
def setUp(self):
super(TestReconstructFragmentArchive, self).setUp()
self.obj_path = b'/a/c/' + self.obj_name
self.obj_timestamp = self.ts()
def _create_fragment(self, frag_index, body=b'test data'):
utils.mkdirs(os.path.join(self.devices, 'sda1'))
df_mgr = self.reconstructor._df_router[self.policy]
if six.PY2:
obj_name = self.obj_name
else:
obj_name = self.obj_name.decode('utf8')
self.df = df_mgr.get_diskfile('sda1', 9, 'a', 'c', obj_name,
policy=self.policy)
write_diskfile(self.df, self.obj_timestamp, data=body,
frag_index=frag_index)
self.df.open()
self.logger.clear()
return self.df
def test_reconstruct_fa_no_errors(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# make a hook point at
# swift.obj.reconstructor.ObjectReconstructor._get_response
called_headers = []
orig_func = object_reconstructor.ObjectReconstructor._get_response
def _get_response_hook(self, node, policy, part, path, headers):
called_headers.append(headers)
return orig_func(self, node, policy, part, path, headers)
codes, body_iter, headers = zip(*responses)
get_response_path = \
'swift.obj.reconstructor.ObjectReconstructor._get_response'
with mock.patch(get_response_path, _get_response_hook):
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2, body=b''))
self.assertEqual(0, df.content_length)
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
self.assertEqual(len(part_nodes) - 1, len(called_headers),
'Expected %d calls, got %r' % (len(part_nodes) - 1,
called_headers))
for called_header in called_headers:
called_header = HeaderKeyDict(called_header)
self.assertIn('Content-Length', called_header)
self.assertEqual(called_header['Content-Length'], '0')
self.assertIn('User-Agent', called_header)
user_agent = called_header['User-Agent']
self.assertTrue(user_agent.startswith('obj-reconstructor'))
self.assertIn('X-Backend-Storage-Policy-Index', called_header)
self.assertEqual(called_header['X-Backend-Storage-Policy-Index'],
self.policy)
self.assertIn('X-Backend-Fragment-Preferences', called_header)
self.assertEqual(
[{'timestamp': self.obj_timestamp.normal, 'exclude': []}],
json.loads(called_header['X-Backend-Fragment-Preferences']))
self.assertIn('X-Backend-Replication', called_header)
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_errors_works(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
base_responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
base_responses.append((200, body, headers))
# since we're already missing a fragment a +2 scheme can only support
# one additional failure at a time
for error in (Timeout(), 404, Exception('kaboom!')):
responses = base_responses
error_index = random.randint(0, len(responses) - 1)
responses[error_index] = (error, '', '')
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
def test_reconstruct_fa_mixed_meta_timestamps_works(self):
# verify scenario where all fragments have same data timestamp but some
# have different meta timestamp
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
ts_data = next(self.ts_iter) # all frags .data timestamp
ts_meta = next(self.ts_iter) # some frags .meta timestamp
ts_cycle = itertools.cycle((ts_data, ts_meta))
responses = list()
for body in ec_archive_bodies:
ts = next(ts_cycle) # vary timestamp between data and meta
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts_data.internal,
'X-Backend-Durable-Timestamp': ts_data.internal})
responses.append((200, body, headers))
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
def test_reconstruct_fa_error_with_invalid_header(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
base_responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
base_responses.append((200, body, headers))
responses = base_responses
# force the test to exercise the handling of this bad response by
# sticking it in near the front
error_index = random.randint(0, self.policy.ec_ndata - 1)
status, body, headers = responses[error_index]
# one esoteric failure is a literal string 'None' in place of the
# X-Object-Sysmeta-EC-Frag-Index
stub_node_job = {'some_keys': 'foo', 'but_not': 'frag_index'}
headers['X-Object-Sysmeta-Ec-Frag-Index'] = str(
stub_node_job.get('frag_index'))
# oops!
self.assertEqual('None',
headers.get('X-Object-Sysmeta-Ec-Frag-Index'))
responses[error_index] = status, body, headers
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
# ... this bad response should be ignored like any other failure
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
def test_reconstruct_parity_fa_with_data_node_failure(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[-4]
node['backend_index'] = self.policy.get_backend_index(node['index'])
# make up some data (trim some amount to make it unaligned with
# segment size)
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-454]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# the scheme is 10+4, so this gets a parity node
broken_body = ec_archive_bodies.pop(-4)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
for error in (Timeout(), 404, Exception('kaboom!')):
# grab a data node index
error_index = random.randint(0, self.policy.ec_ndata - 1)
responses[error_index] = (error, '', '')
codes, body_iter, headers_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers_iter):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
def test_reconstruct_fa_exceptions_fails(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
policy = self.policy
possible_errors = [Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors) for i in
range(policy.object_ring.replicas - 1)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self._create_fragment(2))
error_lines = self.logger.get_lines_for_level('error')
# # of replicas failed and one more error log to report not enough
# responses to reconstruct.
self.assertEqual(policy.object_ring.replicas, len(error_lines))
for line in error_lines[:-1]:
self.assertIn("Trying to GET", line)
self.assertIn(
'Unable to get enough responses (%s x unknown error responses)'
% (policy.object_ring.replicas - 1),
error_lines[-1],
"Unexpected error line found: %s" % error_lines[-1])
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_all_404s_fails(self):
self._create_fragment(2)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
policy = self.policy
codes = [404 for i in range(policy.object_ring.replicas - 1)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.df)
error_lines = self.logger.get_lines_for_level('error')
# only 1 log to report not enough responses
self.assertEqual(1, len(error_lines))
self.assertIn(
'Unable to get enough responses (%s x 404 error responses)'
% (policy.object_ring.replicas - 1),
error_lines[0],
"Unexpected error line found: %s" % error_lines[0])
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_all_404s_fails_custom_request_node_count(self):
# verify that when quarantine_threshold is not set the number of
# requests is capped at replicas - 1 regardless of request_node_count
self._create_fragment(2)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
ring = self.policy.object_ring
# sanity check: number of handoffs available == replicas
self.assertEqual(ring.max_more_nodes, ring.replicas)
for request_node_count in (0,
self.policy.ec_ndata - 1,
ring.replicas + 1,
2 * ring.replicas - 1,
2 * ring.replicas,
3 * ring.replicas,
99 * ring.replicas):
with annotate_failure(request_node_count):
self.logger.clear()
self.reconstructor.request_node_count = \
lambda replicas: request_node_count
# request count capped at num primaries - 1
exp_requests = ring.replicas - 1
codes = [404 for i in range(exp_requests)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError,
self.reconstructor.reconstruct_fa,
job, node, self.df)
error_lines = self.logger.get_lines_for_level('error')
# only 1 log to report not enough responses
self.assertEqual(1, len(error_lines))
self.assertIn(
'Unable to get enough responses (%s x 404 error responses)'
% exp_requests,
error_lines[0],
"Unexpected error line found: %s" % error_lines[0])
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_mixture_of_errors_fails(self):
self._create_fragment(2)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
policy = self.policy
# ensure at least one of each error type
possible_errors = [Timeout(), 404, 507]
codes = possible_errors + [random.choice(possible_errors) for i in
range(policy.object_ring.replicas - 4)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.df)
exp_timeouts = len([c for c in codes if isinstance(c, Timeout)])
exp_404s = len([c for c in codes if c == 404])
exp_507s = len([c for c in codes if c == 507])
error_lines = self.logger.get_lines_for_level('error')
# 1 error log to report not enough responses and possibly some to
# report Timeouts
self.assertEqual(len(error_lines), exp_timeouts + 1, error_lines)
for line in error_lines[:-1]:
self.assertIn("Trying to GET", line)
self.assertIn(
'Unable to get enough responses '
'(%s x unknown, %s x 404, %s x 507 error responses)'
% (exp_timeouts, exp_404s, exp_507s), error_lines[-1],
"Unexpected error line found: %s" % error_lines[-1])
# no warning
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(exp_507s, len(warning_lines), warning_lines)
for line in warning_lines:
self.assertIn('Invalid response 507', line)
def test_reconstruct_fa_with_mixed_old_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# bad response
broken_body = ec_archive_bodies.pop(1)
ts = make_timestamp_iter()
bad_headers = get_header_frag_index(self, broken_body)
bad_headers.update({
'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal,
})
# good responses
responses = list()
t1 = next(ts).internal
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': t1})
responses.append((200, body, headers))
# include the one older frag with different etag in first responses
error_index = random.randint(0, self.policy.ec_ndata - 1)
error_headers = get_header_frag_index(self,
(responses[error_index])[1])
error_headers.update(bad_headers)
bad_response = (200, '', bad_headers)
responses[error_index] = bad_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_new_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
ts = make_timestamp_iter()
# good responses
responses = list()
t0 = next(ts).internal
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': t0})
responses.append((200, body, headers))
# sanity check before negative test
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# one newer etag won't spoil the bunch
new_index = random.randint(0, self.policy.ec_ndata - 1)
new_headers = get_header_frag_index(self, (responses[new_index])[1])
new_headers.update({'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal})
new_response = (200, '', new_headers)
responses[new_index] = new_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_etag_with_same_timestamp(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
# good responses
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# sanity check before negative test
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# a response at same timestamp but different etag won't spoil the bunch
# N.B. (FIXME). if we choose the first response as garbage, the
# reconstruction fails because all other *correct* frags will be
# assumed as garbage. To avoid the freaky failing set randint
# as [1, self.policy.ec_ndata - 1] to make the first response
# always have the correct etag to reconstruct
new_index = random.randint(1, self.policy.ec_ndata - 1)
new_headers = get_header_frag_index(self, (responses[new_index])[1])
new_headers.update({'X-Object-Sysmeta-Ec-Etag': 'some garbage'})
new_response = (200, '', new_headers)
responses[new_index] = new_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# expect an error log but no warnings
error_log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_log_lines))
self.assertIn(
'Mixed Etag (some garbage, %s) for 10.0.0.1:1001/sdb/0%s '
'policy#%s frag#1' %
(etag, self.obj_path.decode('utf8'), int(self.policy)),
error_log_lines[0])
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_timestamps_etags_fail(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
ec_archive_dict = dict()
ts = make_timestamp_iter()
# create 3 different ec bodies
for i in range(3):
body = test_data[i:]
archive_bodies = encode_frag_archive_bodies(self.policy, body)
# pop the index to the destination node
archive_bodies.pop(1)
key = (md5(body, usedforsecurity=False).hexdigest(),
next(ts).internal, bool(i % 2))
ec_archive_dict[key] = archive_bodies
responses = list()
# fill out response list by 3 different etag bodies
for etag, ts, durable in itertools.cycle(ec_archive_dict):
body = ec_archive_dict[(etag, ts, durable)].pop(0)
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': ts})
if durable:
headers['X-Backend-Durable-Timestamp'] = ts
responses.append((200, body, headers))
if len(responses) >= (self.policy.object_ring.replicas - 1):
break
# sanity, there is 3 different etag and each etag
# doesn't have > ec_k bodies
etag_count = collections.Counter(
[in_resp_headers['X-Object-Sysmeta-Ec-Etag']
for _, _, in_resp_headers in responses])
self.assertEqual(3, len(etag_count))
for etag, count in etag_count.items():
self.assertLess(count, self.policy.ec_ndata)
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self._create_fragment(2))
error_lines = self.logger.get_lines_for_level('error')
# 1 error log per etag to report not enough responses
self.assertEqual(3, len(error_lines))
for error_line in error_lines:
for expected_etag, ts, durable in ec_archive_dict:
if expected_etag in error_line:
break
else:
self.fail(
"no expected etag %s found: %s" %
(list(ec_archive_dict), error_line))
# remove the found etag which should not be found in the
# following error lines
del ec_archive_dict[(expected_etag, ts, durable)]
expected = 'Unable to get enough responses (%s/10 from %s ok ' \
'responses) to reconstruct %s 10.0.0.1:1001/sdb/0%s ' \
'policy#0 frag#1 with ETag %s and timestamp %s' %\
(etag_count[expected_etag], etag_count[expected_etag],
'durable' if durable else 'non-durable',
self.obj_path.decode('utf8'), expected_etag, ts)
self.assertIn(
expected, error_line,
"Unexpected error line found: Expected: %s Got: %s"
% (expected, error_line))
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_with_mixed_etags_same_timestamp_fail(self):
self._create_fragment(2)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
ec_archive_dict = dict()
ts = next(make_timestamp_iter())
# create 3 different ec bodies
for i in range(3):
body = test_data[i:]
archive_bodies = encode_frag_archive_bodies(self.policy, body)
# pop the index to the destination node
archive_bodies.pop(1)
key = (md5(body, usedforsecurity=False).hexdigest(),
ts.internal, bool(i % 2))
ec_archive_dict[key] = archive_bodies
responses = list()
# fill out response list by 3 different etag bodies, same timestamp
for etag, ts, durable in itertools.cycle(ec_archive_dict):
body = ec_archive_dict[(etag, ts, durable)].pop(0)
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': ts})
if durable:
headers['X-Backend-Durable-Timestamp'] = ts
responses.append((200, body, headers))
if len(responses) >= (self.policy.object_ring.replicas - 1):
break
# sanity, there is 3 different etag and each etag
# doesn't have > ec_k bodies
etag_count = collections.Counter(
[in_resp_headers['X-Object-Sysmeta-Ec-Etag']
for _, _, in_resp_headers in responses])
self.assertEqual(3, len(etag_count))
for etag, count in etag_count.items():
self.assertLess(count, self.policy.ec_ndata)
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, self.df)
error_lines = self.logger.get_lines_for_level('error')
self.assertGreater(len(error_lines), 1)
for expected_etag, ts, durable in ec_archive_dict:
if expected_etag in error_lines[-1]:
break
else:
self.fail(
"no expected etag %s found: %s" %
(list(ec_archive_dict), error_lines[0]))
other_etags_count = sum(count for etag, count in etag_count.items()
if etag != expected_etag)
self.assertEqual(other_etags_count + 1, len(error_lines))
for line in error_lines[:-1]:
self.assertIn('Mixed Etag', line)
expected = 'Unable to get enough responses (%s/10 from %s ok ' \
'responses) to reconstruct %s 10.0.0.1:1001/sdb/0%s ' \
'policy#0 frag#1 with ETag %s and timestamp %s' % \
(etag_count[expected_etag], len(responses),
'durable' if durable else 'non-durable',
self.obj_path.decode('utf8'), expected_etag, ts)
self.assertIn(
expected, error_lines[-1],
"Unexpected error line found: Expected: %s Got: %s"
% (expected, error_lines[0]))
# no warning
self.assertFalse(self.logger.get_lines_for_level('warning'))
def test_reconstruct_fa_finds_missing_frag_does_not_fail(self):
# verify that reconstruction of a missing frag can cope with finding
# that missing frag in the responses it gets from other nodes while
# attempting to rebuild the missing frag
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
broken_index = random.randint(0, self.policy.ec_ndata - 1)
node = part_nodes[broken_index]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
# instead of popping the broken body, we'll just leave it in the list
# of responses and take away something else.
broken_body = ec_archive_bodies[broken_index]
ec_archive_bodies = ec_archive_bodies[:-1]
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no error, no warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
# the found own frag will be reported in the debug message
debug_log_lines = self.logger.get_lines_for_level('debug')
# redundant frag found once in first ec_ndata responses
self.assertIn(
'Found existing frag #%s at' % broken_index,
debug_log_lines[0], debug_log_lines)
# N.B. in the future, we could avoid those check because
# definitely sending the copy rather than reconstruct will
# save resources. But one more reason, we're avoiding to
# use the dest index fragment even if it goes to reconstruct
# function is that it will cause a bunch of warning log from
# liberasurecode[1].
# 1: https://github.com/openstack/liberasurecode/blob/
# master/src/erasurecode.c#L870
log_prefix = 'Reconstruct frag #%s with frag indexes' % broken_index
self.assertIn(log_prefix, debug_log_lines[1])
self.assertFalse(debug_log_lines[2:])
got_frag_index_list = json.loads(
debug_log_lines[1][len(log_prefix):])
self.assertNotIn(broken_index, got_frag_index_list)
def test_quarantine_threshold_conf(self):
reconstructor = object_reconstructor.ObjectReconstructor({})
self.assertEqual(0, reconstructor.quarantine_threshold)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_threshold': '0'})
self.assertEqual(0, reconstructor.quarantine_threshold)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_threshold': '1'})
self.assertEqual(1, reconstructor.quarantine_threshold)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_threshold': 2.0})
self.assertEqual(2, reconstructor.quarantine_threshold)
for bad in ('1.1', 1.1, '-1', -1, 'auto', 'bad'):
with annotate_failure(bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'quarantine_threshold': bad})
def test_quarantine_age_conf(self):
# defaults to DEFAULT_RECLAIM_AGE
reconstructor = object_reconstructor.ObjectReconstructor({})
self.assertEqual(604800, reconstructor.quarantine_age)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_age': '0'})
self.assertEqual(0, reconstructor.quarantine_age)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_age': '1'})
self.assertEqual(1, reconstructor.quarantine_age)
# trumps reclaim_age
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_age': '1', 'reclaim_age': 0})
self.assertEqual(1, reconstructor.quarantine_age)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_age': '1', 'reclaim_age': 2})
self.assertEqual(1, reconstructor.quarantine_age)
reconstructor = object_reconstructor.ObjectReconstructor(
{'quarantine_age': 2.2})
self.assertEqual(2, reconstructor.quarantine_age)
for bad in ('1.1', 'auto', 'bad'):
with annotate_failure(bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'quarantine_age': bad})
def test_request_node_count_conf(self):
# default is 1 * replicas
reconstructor = object_reconstructor.ObjectReconstructor({})
self.assertEqual(6, reconstructor.request_node_count(3))
self.assertEqual(22, reconstructor.request_node_count(11))
def do_test(value, replicas, expected):
reconstructor = object_reconstructor.ObjectReconstructor(
{'request_node_count': value})
self.assertEqual(expected,
reconstructor.request_node_count(replicas))
do_test('0', 10, 0)
do_test('1 * replicas', 3, 3)
do_test('1 * replicas', 11, 11)
do_test('2 * replicas', 3, 6)
do_test('2 * replicas', 11, 22)
do_test('11', 11, 11)
do_test('10', 11, 10)
do_test('12', 11, 12)
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with annotate_failure(bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'request_node_count': bad})
def _do_test_reconstruct_insufficient_frags(
self, extra_conf, num_frags, other_responses,
local_frag_index=2, frag_index_to_rebuild=1,
resp_timestamps=None, resp_etags=None):
# num_frags is number of ok responses, other_responses is bad responses
# By default frag_index_to_rebuild is less than local_frag_index and
# all frag responses have indexes >= local_frag_index
self.assertGreater(num_frags, 0)
self.logger.clear()
self._configure_reconstructor(**extra_conf)
self._create_fragment(local_frag_index)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[frag_index_to_rebuild]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
frags = ec_archive_bodies[
local_frag_index:local_frag_index + num_frags]
if resp_etags:
self.assertEqual(len(frags), len(resp_etags))
etags = []
for other_etag in resp_etags:
# use default etag where other_etag is None
etags.append(other_etag if other_etag else etag)
else:
etags = [etag] * len(frags)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etags.pop(0)})
return headers
responses = [(200, frag, make_header(frag)) for frag in frags]
codes, body_iter, headers = zip(*(responses + other_responses))
resp_timestamps = (resp_timestamps if resp_timestamps
else [self.obj_timestamp] * len(codes))
resp_timestamps = [ts.internal for ts in resp_timestamps]
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers,
timestamps=resp_timestamps):
with self.assertRaises(DiskFileError) as cm:
self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
return cm.exception
def _verify_error_lines(self, num_frags, other_responses,
exp_useful_responses):
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines), error_lines)
self.assertIn(
'Unable to get enough responses (%d/%d from %d ok responses)'
% (exp_useful_responses, self.policy.ec_ndata, num_frags),
error_lines[0])
bad_codes = collections.Counter(
status for status, _, _ in other_responses)
errors = ', '.join('%s x %s' % (num, code)
for code, num in sorted(bad_codes.items()))
self.assertIn('Unable to get enough responses (%s error responses)'
% errors, error_lines[1])
def _assert_diskfile_quarantined(self):
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn('Quarantined object', warning_lines[0])
# Check the diskfile has moved to quarantine dir
data_filename = os.path.basename(self.df._data_file)
df_hash = os.path.basename(self.df._datadir)
quarantine_dir = os.path.join(
self.df._device_path, 'quarantined',
diskfile.get_data_dir(self.policy), df_hash)
self.assertTrue(os.path.isdir(quarantine_dir))
quarantine_file = os.path.join(quarantine_dir, data_filename)
self.assertTrue(os.path.isfile(quarantine_file))
with open(quarantine_file, 'r') as fd:
self.assertEqual('test data', fd.read())
self.assertFalse(os.path.exists(self.df._data_file))
def _assert_diskfile_not_quarantined(self):
# Check the diskfile has not moved to quarantine dir
quarantine_dir = os.path.join(
self.df._device_path, 'quarantined')
self.assertFalse(os.path.isdir(quarantine_dir))
self.assertTrue(os.path.exists(self.df._data_file))
with open(self.df._data_file, 'r') as fd:
self.assertEqual('test data', fd.read())
def test_reconstruct_fa_quarantine_threshold_one_rnc_two_replicas(self):
# use default request_node_count == 2 * replicas
num_other_resps = 2 * self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 1, 'reclaim_age': 0}
exc = self._do_test_reconstruct_insufficient_frags(
conf, 1, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(1, other_responses, 1)
def test_reconstruct_fa_quarantine_threshold_one_rnc_three_replicas(self):
num_other_resps = 3 * self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 1, 'reclaim_age': 0,
'request_node_count': '3 * replicas'}
# set ring get_more_nodes to yield enough handoffs
self.policy.object_ring.max_more_nodes = (
2 * self.policy.object_ring.replicas)
exc = self._do_test_reconstruct_insufficient_frags(
conf, 1, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(1, other_responses, 1)
def test_reconstruct_fa_quarantine_threshold_one_rnc_four_replicas(self):
# verify handoff search exhausting handoff node iter
num_other_resps = 3 * self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 1, 'reclaim_age': 0,
'request_node_count': '4 * replicas'}
# limit ring get_more_nodes to yield less than
# (request_node_count - 1 * replicas) nodes
self.policy.object_ring.max_more_nodes = (
2 * self.policy.object_ring.replicas)
exc = self._do_test_reconstruct_insufficient_frags(
conf, 1, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(1, other_responses, 1)
def test_reconstruct_fa_quarantine_threshold_one_rnc_absolute_number(self):
def do_test(rnc_num):
if rnc_num < self.policy.object_ring.replicas:
num_other_resps = self.policy.object_ring.replicas - 2
else:
num_other_resps = rnc_num - 2
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 1, 'reclaim_age': 0,
'request_node_count': str(rnc_num)}
# set ring get_more_nodes to yield enough handoffs
self.policy.object_ring.max_more_nodes = (
2 * self.policy.object_ring.replicas)
exc = self._do_test_reconstruct_insufficient_frags(
conf, 1, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(1, other_responses, 1)
for rnc_num in range(0, 3 * self.policy.object_ring.replicas):
do_test(rnc_num)
def test_reconstruct_fa_quarantine_threshold_two(self):
num_other_resps = 2 * self.policy.object_ring.replicas - 3
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 2, 'reclaim_age': 0}
exc = self._do_test_reconstruct_insufficient_frags(
conf, 2, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(2, other_responses, 2)
def test_reconstruct_fa_quarantine_threshold_two_with_quarantine_age(self):
num_other_resps = 2 * self.policy.object_ring.replicas - 3
other_responses = [(404, None, None)] * num_other_resps
conf = {'quarantine_threshold': 2,
'quarantine_age': 0, # quarantine age trumps reclaim age
'reclaim_age': 1000}
exc = self._do_test_reconstruct_insufficient_frags(
conf, 2, other_responses)
self.assertIsInstance(exc, DiskFileQuarantined)
self._assert_diskfile_quarantined()
self._verify_error_lines(2, other_responses, 2)
def test_reconstruct_fa_no_quarantine_more_than_threshold_frags(self):
# default config
num_other_resps = self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
exc = self._do_test_reconstruct_insufficient_frags(
{'reclaim_age': 0}, 1, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
# configured quarantine_threshold
for quarantine_threshold in range(self.policy.ec_ndata):
for num_frags in range(quarantine_threshold + 1,
self.policy.ec_ndata):
num_other_resps = (self.policy.object_ring.replicas -
num_frags - 1)
other_responses = [(404, None, None)] * num_other_resps
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': quarantine_threshold,
'reclaim_age': 0},
num_frags, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(num_frags, other_responses, num_frags)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
# responses include the frag_index_to_rebuild - verify that response is
# counted against the threshold
num_other_resps = self.policy.object_ring.replicas - 3
other_responses = [(404, None, None)] * num_other_resps
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0}, 2, other_responses,
local_frag_index=2, frag_index_to_rebuild=3)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(2, other_responses, 1)
def test_reconstruct_fa_no_quarantine_non_404_response(self):
num_frags = 1
ring = self.policy.object_ring
for bad_status in (400, 503, 507):
# a non-404 in primary responses will prevent quarantine
num_other_resps = ring.replicas - num_frags - 1
other_responses = [(404, None, None)] * (num_other_resps - 1)
other_responses.append((bad_status, None, None))
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0},
num_frags, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(num_frags, other_responses, num_frags)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn('Invalid response %s' % bad_status, warning_lines[0])
# a non-404 in handoff responses will prevent quarantine; non-404
# is the *final* handoff response...
ring.max_more_nodes = (13 * ring.replicas)
for request_node_count in (2, 3, 13):
num_other_resps = (request_node_count * ring.replicas
- num_frags - 1)
other_responses = [(404, None, None)] * (num_other_resps - 1)
other_responses.append((bad_status, None, None))
with annotate_failure(
'request_node_count=%d' % request_node_count):
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1,
'reclaim_age': 0,
'request_node_count': '%s * replicas'
% request_node_count},
num_frags, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(num_frags, other_responses, num_frags)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn('Invalid response %s' % bad_status,
warning_lines[0])
# a non-404 in handoff responses will prevent quarantine; non-404
# is part way through all handoffs so not all handoffs are used
# regardless of how big request_node_count is
non_404_handoff = 3
for request_node_count in (2, 3, 13):
# replicas - 1 - num_frags other_responses from primaries,
# plus a batch of replicas - 1 during which non-404 shows up,
# plus some that trickle out before the non-404 shows up, but
# limited to (request_node_count * replicas - num_frags - 1)
# e.g. for 10+4 policy with request_node_count > 2
# - batch of 13 requests go to primaries,
# - 12 other_responses are consumed,
# - then a batch of 13 handoff requests is sent,
# - the non-404 is the 4th response in that batch,
# - so 3 more requests will have been trickled out
batch_size = ring.replicas - 1
num_other_resps = min(
2 * batch_size - num_frags + non_404_handoff,
request_node_count * ring.replicas - 1 - num_frags)
other_responses = [(404, None, None)] * (num_other_resps - 1)
other_responses.insert(
batch_size - num_frags + non_404_handoff,
(bad_status, None, None))
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0,
'request_node_count': '%s * replicas'
% request_node_count},
num_frags, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(num_frags, other_responses, num_frags)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn('Invalid response %s' % bad_status,
warning_lines[0])
def test_reconstruct_fa_no_quarantine_frag_not_old_enough(self):
# verify that solitary fragment is not quarantined if it has not
# reached reclaim_age
num_other_resps = self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 10000},
1, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(1, other_responses, 1)
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1,
'quarantine_age': 10000, # quarantine_age trumps reclaim_age
'reclaim_age': 0},
1, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(1, other_responses, 1)
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1}, # default reclaim_age
1, other_responses)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(1, other_responses, 1)
def test_reconstruct_fa_no_quarantine_frag_resp_different_timestamp(self):
# verify that solitary fragment is not quarantined if the only frag
# response is for a different timestamp than the local frag
resp_timestamp = utils.Timestamp(float(self.obj_timestamp) + 1)
num_other_resps = self.policy.object_ring.replicas - 2
other_responses = [(404, None, None)] * num_other_resps
resp_timestamps = [resp_timestamp] * (num_other_resps + 1)
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0},
1, other_responses, resp_timestamps=resp_timestamps)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(1, other_responses, 1)
def test_reconstruct_fa_no_quarantine_frag_resp_mixed_timestamps(self):
# verify that solitary fragment is not quarantined if there is a
# response for a frag at different timestamp in addition to the
# response for the solitary local frag
resp_timestamp = utils.Timestamp(float(self.obj_timestamp) + 1)
num_other_resps = self.policy.object_ring.replicas - 3
other_responses = [(404, None, None)] * num_other_resps
resp_timestamps = ([self.obj_timestamp] +
[resp_timestamp] * (num_other_resps + 1))
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0},
2, other_responses, resp_timestamps=resp_timestamps)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines), error_lines)
self.assertIn(
'Unable to get enough responses (1/%d from 1 ok responses)'
% (self.policy.ec_ndata,), error_lines[0])
self.assertIn(
'Unable to get enough responses (1/%d from 1 ok responses)'
% (self.policy.ec_ndata,), error_lines[1])
self.assertIn(
'Unable to get enough responses (%d x 404 error responses)'
% num_other_resps, error_lines[2])
def test_reconstruct_fa_no_quarantine_frag_resp_mixed_etags(self):
# verify that solitary fragment is not quarantined if there is a
# response for a frag with different etag in addition to the
# response for the solitary local frag
etags = [None, 'unexpected_etag']
num_other_resps = self.policy.object_ring.replicas - 3
other_responses = [(404, None, None)] * num_other_resps
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1, 'reclaim_age': 0},
2, other_responses, resp_etags=etags)
self.assertIsInstance(exc, DiskFileError)
self._assert_diskfile_not_quarantined()
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines), error_lines)
self.assertIn(
'Mixed Etag', error_lines[0])
self.assertIn(
'Unable to get enough responses (1/%d from 2 ok responses)'
% (self.policy.ec_ndata,), error_lines[1])
self.assertIn(
'Unable to get enough responses (%d x 404 error responses)'
% num_other_resps, error_lines[2])
def _do_test_reconstruct_fa_no_quarantine_bad_headers(self, bad_headers):
# verify that responses with invalid headers count against the
# quarantine_threshold
self._configure_reconstructor(reclaim_age=0, quarantine_threshold=1)
local_frag_index = 2
self._create_fragment(local_frag_index)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[0]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = []
body = ec_archive_bodies[2]
headers = make_header(body)
responses.append((200, body, headers))
body = ec_archive_bodies[3]
headers = make_header(body)
headers.update(bad_headers)
responses.append((200, body, headers))
other_responses = ([(404, None, None)] *
(self.policy.object_ring.replicas - 3))
codes, body_iter, headers = zip(*(responses + other_responses))
resp_timestamps = [self.obj_timestamp] * len(codes)
resp_timestamps = [ts.internal for ts in resp_timestamps]
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers,
timestamps=resp_timestamps):
with self.assertRaises(DiskFileError) as cm:
self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
self.assertIsInstance(cm.exception, DiskFileError)
self._assert_diskfile_not_quarantined()
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines), error_lines)
self.assertIn(
'Unable to get enough responses (1/%d from 1 ok responses)'
% (self.policy.ec_ndata,), error_lines[0])
self.assertIn(
'Unable to get enough responses '
'(1 x unknown, %d x 404 error responses)'
% len(other_responses), error_lines[1])
def test_reconstruct_fa_no_quarantine_invalid_frag_index_header(self):
self._do_test_reconstruct_fa_no_quarantine_bad_headers(
{'X-Object-Sysmeta-Ec-Frag-Index': 'two'})
def test_reconstruct_fa_no_quarantine_missing_frag_index_header(self):
self._do_test_reconstruct_fa_no_quarantine_bad_headers(
{'X-Object-Sysmeta-Ec-Frag-Index': ''})
def test_reconstruct_fa_no_quarantine_missing_timestamp_header(self):
self._do_test_reconstruct_fa_no_quarantine_bad_headers(
{'X-Backend-Data-Timestamp': ''})
def test_reconstruct_fa_no_quarantine_missing_etag_header(self):
self._do_test_reconstruct_fa_no_quarantine_bad_headers(
{'X-Object-Sysmeta-Ec-Etag': ''})
def test_reconstruct_fa_frags_on_handoffs(self):
# just a lonely old frag on primaries: this appears to be a quarantine
# candidate, but unexpectedly the other frags are found on handoffs so
# expect rebuild
# set reclaim_age to 0 to make lonely frag old enugh for quarantine
self._configure_reconstructor(quarantine_threshold=1, reclaim_age=0)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
# arrange for just one 200 to come from a primary, then 404s, then 200s
# from handoffs
responses = list()
for i, body in enumerate(ec_archive_bodies):
if i == 1:
# skip: this is the frag index we're rebuilding; insert 404s
responses.extend(
((404, None, None),) * self.policy.object_ring.replicas)
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers,
timestamps=[self.obj_timestamp.internal] * len(codes)):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(0, body=b''))
self.assertEqual(0, df.content_length)
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
debug_lines = self.logger.get_lines_for_level('debug')
self.assertIn('Reconstructing frag from handoffs, node_count=%d'
% (self.policy.object_ring.replicas * 2), debug_lines)
def test_reconstruct_fa_finds_duplicate_does_not_fail(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
# add some duplicates
num_duplicates = self.policy.ec_nparity - 1
ec_archive_bodies = (ec_archive_bodies[:num_duplicates] +
ec_archive_bodies)[:-num_duplicates]
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no error and warning
self.assertFalse(self.logger.get_lines_for_level('error'))
self.assertFalse(self.logger.get_lines_for_level('warning'))
debug_log_lines = self.logger.get_lines_for_level('debug')
self.assertEqual(1, len(debug_log_lines))
expected_prefix = 'Reconstruct frag #1 with frag indexes'
self.assertIn(expected_prefix, debug_log_lines[0])
got_frag_index_list = json.loads(
debug_log_lines[0][len(expected_prefix):])
self.assertNotIn(1, got_frag_index_list)
def test_reconstruct_fa_missing_headers(self):
# This is much negative tests asserting when the expected
# headers are missing in the responses to gather fragments
# to reconstruct
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update(
{'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': self.obj_timestamp.internal})
return headers
def test_missing_header(missing_header, warning_extra):
self.logger._clear()
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
# To drop the header from the response[0], set None as the value
# explicitly instead of deleting the key because if no key exists
# in the dict, fake_http_connect will insert some key/value pairs
# automatically (e.g. X-Backend-Timestamp)
responses[0][2].update({missing_header: None})
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers) as mock_conn:
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no errors
self.assertFalse(self.logger.get_lines_for_level('error'))
# ...but warning for the missing header
warning_log_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_log_lines))
path = unquote(
'%(ip)s:%(port)d%(path)s' % mock_conn.requests[0]
)
expected_warning = 'Invalid resp from %s policy#0%s' % (
path, warning_extra)
if six.PY2:
expected_warning = expected_warning.decode('utf8')
self.assertIn(expected_warning, warning_log_lines)
test_missing_header(
'X-Object-Sysmeta-Ec-Frag-Index',
' (invalid X-Object-Sysmeta-Ec-Frag-Index: None)')
test_missing_header(
'X-Object-Sysmeta-Ec-Etag',
', frag index 0 (missing Etag)')
test_missing_header(
'X-Backend-Timestamp',
', frag index 0 (missing X-Backend-Data-Timestamp and '
'X-Backend-Timestamp)')
def test_reconstruct_fa_invalid_frag_index_headers(self):
# This is much negative tests asserting when the expected
# ec frag index header has invalid value in the responses
# to gather fragments to reconstruct
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
def test_invalid_ec_frag_index_header(invalid_frag_index):
self.logger._clear()
responses = [(200, body, make_header(body))
for body in ec_archive_bodies]
responses[0][2].update({
'X-Object-Sysmeta-Ec-Frag-Index': invalid_frag_index})
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers) as mock_conn:
df = self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
# no errors
self.assertFalse(self.logger.get_lines_for_level('error'))
# ...but warning for the invalid header
warning_log_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_log_lines))
path = unquote(
'%(ip)s:%(port)d%(path)s' % mock_conn.requests[0]
)
expected_warning = (
'Invalid resp from %s policy#0 '
'(invalid X-Object-Sysmeta-Ec-Frag-Index: %r)'
% (path, invalid_frag_index))
if six.PY2:
expected_warning = expected_warning.decode('utf8')
self.assertIn(expected_warning, warning_log_lines)
for value in ('None', 'invalid'):
test_invalid_ec_frag_index_header(value)
@patch_policies(with_ec_default=True)
class TestReconstructFragmentArchiveUTF8(TestReconstructFragmentArchive):
# repeat superclass tests with an object path that contains non-ascii chars
obj_name = b'o\xc3\xa8'
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='other')],
fake_ring_args=[{'replicas': 28}, {'replicas': 3}])
class TestReconstructFragmentArchiveECDuplicationFactor(
TestReconstructFragmentArchive):
def test_reconstruct_fa_no_quarantine_duplicate_frags(self):
# verify that quarantine does not happen if the only other response in
# addition to the lonely frag's own response is for the same
# (duplicate) frag index
self._configure_reconstructor(quarantine_threshold=1, reclaim_age=0)
local_frag_index = 2
self._create_fragment(local_frag_index)
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[0]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
frags = [
ec_archive_bodies[local_frag_index],
ec_archive_bodies[local_frag_index +
self.policy.ec_n_unique_fragments]]
def make_header(body):
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
return headers
responses = [(200, frag, make_header(frag)) for frag in frags]
other_responses = ([(404, None, None)] *
(self.policy.ec_n_unique_fragments * 2 - 3))
codes, body_iter, headers = zip(*(responses + other_responses))
resp_timestamps = [self.obj_timestamp.internal] * len(codes)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers,
timestamps=resp_timestamps):
with self.assertRaises(DiskFileError) as cm:
self.reconstructor.reconstruct_fa(
job, node, self._create_fragment(2))
self.assertIsInstance(cm.exception, DiskFileError)
self._assert_diskfile_not_quarantined()
self._verify_error_lines(2, other_responses, 1)
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='other')],
fake_ring_args=[{'replicas': 28}, {'replicas': 3}])
class TestObjectReconstructorECDuplicationFactor(TestObjectReconstructor):
def setUp(self):
super(TestObjectReconstructorECDuplicationFactor, self).setUp()
self.fabricated_ring = FabricatedRing(replicas=28, devices=56)
def _test_reconstruct_with_duplicate_frags_no_errors(self, index):
utils.mkdirs(os.path.join(self.devices, 'sda1'))
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile('sda1', 9, 'a', 'c', 'o',
policy=self.policy)
write_diskfile(df, self.ts(), data=b'', frag_index=2)
df.open()
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[index]
node['backend_index'] = self.policy.get_backend_index(node['index'])
test_data = (b'rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = encode_frag_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(index)
responses = list()
for body in ec_archive_bodies:
headers = get_header_frag_index(self, body)
headers.update({'X-Object-Sysmeta-Ec-Etag': etag})
responses.append((200, body, headers))
# make a hook point at
# swift.obj.reconstructor.ObjectReconstructor._get_response
called_headers = []
orig_func = object_reconstructor.ObjectReconstructor._get_response
def _get_response_hook(self, node, policy, part, path, headers):
called_headers.append(headers)
return orig_func(self, node, policy, part, path, headers)
# need parity + 1 node failures to reach duplicated fragments
failed_start_at = (
self.policy.ec_n_unique_fragments - self.policy.ec_nparity - 1)
# set Timeout for node #9, #10, #11, #12, #13
for i in range(self.policy.ec_nparity + 1):
responses[failed_start_at + i] = (Timeout(), '', '')
codes, body_iter, headers = zip(*responses)
get_response_path = \
'swift.obj.reconstructor.ObjectReconstructor._get_response'
with mock.patch(get_response_path, _get_response_hook):
with mocked_http_conn(
*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, df)
fixed_body = b''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(
md5(fixed_body, usedforsecurity=False).hexdigest(),
md5(broken_body, usedforsecurity=False).hexdigest())
for called_header in called_headers:
called_header = HeaderKeyDict(called_header)
self.assertIn('Content-Length', called_header)
self.assertEqual(called_header['Content-Length'], '0')
self.assertIn('User-Agent', called_header)
user_agent = called_header['User-Agent']
self.assertTrue(user_agent.startswith('obj-reconstructor'))
def test_reconstruct_with_duplicate_frags_no_errors(self):
# any fragments can be broken
for index in range(28):
self._test_reconstruct_with_duplicate_frags_no_errors(index)
def test_iter_nodes_for_frag(self):
self.reconstructor.rebuild_handoff_node_count = -1
policy = ECStoragePolicy(1, name='test', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=3,
ec_duplication_factor=2)
policy.object_ring = FabricatedRing(replicas=14, devices=42)
primaries = policy.object_ring.get_part_nodes(0)
node = primaries[0]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [0, 0, 7, 14, 21]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(0, node['backend_index'])
node = primaries[3]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [3, 3, 10, 17, 24]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(3, node['backend_index'])
node = primaries[7]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [7, 0, 7, 14, 21]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(0, node['backend_index'])
node = primaries[-1]
nodes_for_frag = list(self.reconstructor._iter_nodes_for_frag(
policy, 0, node))
expected = [13, 6, 13, 20, 27]
self.assertEqual(expected, [n.get('index', n.get('handoff_index'))
for n in nodes_for_frag])
for node in nodes_for_frag:
self.assertEqual(6, node['backend_index'])
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/obj/test_reconstructor.py |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.unit.proxy import test_server
from swift.obj import mem_server
def setUpModule():
test_server.do_setup(mem_server)
def tearDownModule():
test_server.tearDownModule()
class TestController(test_server.TestController):
pass
class TestProxyServer(test_server.TestProxyServer):
pass
class TestReplicatedObjectController(
test_server.TestReplicatedObjectController):
def test_PUT_no_etag_fallocate(self):
# mem server doesn't call fallocate(), believe it or not
pass
# these tests all go looking in the filesystem
def test_policy_IO(self):
pass
def test_GET_short_read(self):
pass
def test_GET_short_read_resuming(self):
pass
class TestECObjectController(test_server.TestECObjectController):
def test_PUT_ec(self):
pass
def test_PUT_ec_multiple_segments(self):
pass
def test_PUT_ec_fragment_archive_etag_mismatch(self):
pass
def test_reload_ring_ec(self):
pass
class TestContainerController(test_server.TestContainerController):
pass
class TestAccountController(test_server.TestAccountController):
pass
class TestAccountControllerFakeGetResponse(
test_server.TestAccountControllerFakeGetResponse):
pass
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/test_mem_server.py |
swift-master | test/unit/proxy/__init__.py |
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import email.parser
import logging
import json
import math
import os
import posix
import socket
import errno
import sys
import traceback
import unittest
from contextlib import contextmanager
from shutil import rmtree, copyfile, move
import gc
import time
from textwrap import dedent
import collections
from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp, NamedTemporaryFile
import weakref
import operator
import functools
from swift.obj import diskfile
import re
import random
from collections import defaultdict
import uuid
import mock
from eventlet import sleep, spawn, wsgi, Timeout, debug
from eventlet.green import httplib
from io import BytesIO
import six
from six.moves import range
from six.moves.urllib.parse import quote, parse_qsl
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import (
connect_tcp, readuntil2crlfs, fake_http_connect, FakeRing,
FakeMemcache, patch_policies, write_fake_ring, mocked_http_conn,
DEFAULT_TEST_EC_TYPE, make_timestamp_iter, skip_if_no_xattrs,
FakeHTTPResponse)
from test.unit.helpers import setup_servers, teardown_servers
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.obj import server as object_server
from swift.common.bufferedhttp import BufferedHTTPResponse
from swift.common.middleware import proxy_logging, versioned_writes, \
copy, listing_formats
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
APIVersionError, ChunkReadError
from swift.common import utils, constraints, registry
from swift.common.utils import hash_path, storage_directory, \
parse_content_type, parse_mime_headers, StatsdClient, \
iter_multipart_mime_documents, public, mkdirs, NullLogger, md5, \
node_to_string, NamespaceBoundList
from swift.common.wsgi import loadapp, ConfigString
from swift.common.http_protocol import SwiftHttpProtocol
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_cache_key, cors_validation, \
get_account_info, get_container_info
import swift.proxy.controllers
import swift.proxy.controllers.obj
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPException, HTTPBadRequest, wsgi_to_str
from swift.common.storage_policy import StoragePolicy, POLICIES
import swift.common.request_helpers
from swift.common.request_helpers import get_sys_meta_prefix, get_reserved_name
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_context = _test_servers = _test_sockets = _testdir = \
_test_POLICIES = None
def do_setup(object_server):
# setup test context and break out some globals for convenience
global _test_context, _testdir, _test_servers, _test_sockets, \
_test_POLICIES
_test_context = setup_servers(object_server)
_testdir = _test_context["testdir"]
_test_servers = _test_context["test_servers"]
_test_sockets = _test_context["test_sockets"]
_test_POLICIES = _test_context["test_POLICIES"]
def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper
def setUpModule():
do_setup(object_server)
def tearDownModule():
teardown_servers(_test_context)
def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers)
def parse_headers_string(headers_str):
headers_dict = HeaderKeyDict()
for line in headers_str.split(b'\r\n'):
if b': ' in line:
header, value = line.split(b': ', 1)
if six.PY2:
headers_dict[header] = value
else:
headers_dict[header.decode('utf8')] = value.decode('utf8')
return headers_dict
def get_node_error_stats(proxy_app, ring_node):
node_key = proxy_app.error_limiter.node_key(ring_node)
return proxy_app.error_limiter.stats.get(node_key) or {}
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
return get_node_error_stats(proxy_app, ring_node).get('errors', 0)
def node_last_error(proxy_app, ring_node):
# Reach into the proxy's internals to get the last error for a
# particular node
return get_node_error_stats(proxy_app, ring_node).get('last_error')
def set_node_errors(proxy_app, ring_node, value, last_error):
# Set the node's error count to value
node_key = proxy_app.error_limiter.node_key(ring_node)
stats = {'errors': value,
'last_error': last_error}
proxy_app.error_limiter.stats[node_key] = stats
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.proxy.controllers.base, 'http_connect',
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
orig_container_info = getattr(swift.proxy.controllers.Controller,
'container_info', None)
try:
yield True
finally:
swift.proxy.controllers.Controller.account_info = orig_account_info
swift.proxy.controllers.base.http_connect = orig_http_connect
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
if kwargs.get('body') is not None and not isinstance(
kwargs['body'], bytes):
kwargs['body'] = kwargs['body'].encode('ascii')
new_connect = fake_http_connect(*args, **kwargs)
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
return new_connect
def _make_callback_func(calls):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context = {}
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
calls.append(context)
return callback
def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper
# tests
class TestController(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
_test_servers[0].error_limiter.stats.clear() # clear out errors
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
app = proxy_server.Application(None, logger=debug_logger(),
account_ring=self.account_ring,
container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
def __init__(self, memcache):
self.url = "/foo/bar"
self.method = "METHOD"
self.environ = {"swift.cache": memcache}
def as_referer(self):
return self.method + ' ' + self.url
self.account = 'some_account'
self.container = 'some_container'
self.request = FakeReq(self.memcache)
self.read_acl = 'read_acl'
self.write_acl = 'write_acl'
def test_transfer_headers(self):
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = {'x-base-meta-owner': 'Gareth',
'x-base-meta-size': '150M'}
self.controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
self.assertEqual(dst_headers, expected_headers)
def check_account_info_return(self, partition, nodes, is_none=False):
if is_none:
p, n = None, None
else:
p, n = self.account_ring.get_nodes(self.account)
self.assertEqual(p, partition)
self.assertEqual(n, nodes)
def test_account_info_container_count(self):
cache_key = get_cache_key(self.account)
with save_globals():
set_http_connect(200, count=123)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(count, 123)
with save_globals():
set_http_connect(200, count='123')
del self.request.environ['swift.infocache'][cache_key]
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(count, 123)
with save_globals():
account_info = {'status': 200, 'container_count': 1234}
self.memcache.set(cache_key, account_info)
del self.request.environ['swift.infocache'][cache_key]
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(count, 1234)
with save_globals():
account_info = {'status': 200, 'container_count': '1234'}
self.memcache.set(cache_key, account_info)
del self.request.environ['swift.infocache'][cache_key]
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(count, 1234)
def test_make_requests(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
set_http_connect(201, raise_timeout_exc=True)
self.controller._make_request(
nodes, partition, 'POST', '/', {}, '', None,
self.controller.app.logger.thread_locals)
# tests if 200 is cached and used
def test_account_info_200(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# Test the internal representation in memcache
# 'container_count' changed from int to str
cache_key = get_cache_key(self.account)
container_info = {'status': 200,
'account_really_exists': True,
'container_count': '12345',
'total_object_count': None,
'bytes': None,
'storage_policies': {str(p.idx): {
'container_count': 0,
'object_count': 0,
'bytes': 0} for p in POLICIES},
'meta': {},
'sysmeta': {}}
self.assertEqual(container_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# tests if 404 is cached and used
def test_account_info_404(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
# Test the internal representation in memcache
# 'container_count' changed from 0 to None
cache_key = get_cache_key(self.account)
account_info = {'status': 404,
'container_count': None, # internally keep None
'total_object_count': None,
'bytes': None,
'storage_policies': {str(p.idx): {
'container_count': 0,
'object_count': 0,
'bytes': 0} for p in POLICIES},
'meta': {},
'sysmeta': {}}
self.assertEqual(account_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
# tests if some http status codes are not cached
def test_account_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_account_info_return(partition, nodes, True)
self.assertIsNone(count)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_account_info_no_account(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, is_none=True)
self.assertIsNone(count)
def check_container_info_return(self, ret, is_none=False):
if is_none:
partition, nodes, read_acl, write_acl = None, None, None, None
else:
partition, nodes = self.container_ring.get_nodes(self.account,
self.container)
read_acl, write_acl = self.read_acl, self.write_acl
self.assertEqual(partition, ret['partition'])
self.assertEqual(nodes, ret['nodes'])
self.assertEqual(read_acl, ret['read_acl'])
self.assertEqual(write_acl, ret['write_acl'])
def test_container_info_invalid_account(self):
def account_info(self, account, request, autocreate=False):
return None, None
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
ret = self.controller.container_info(self.account,
self.container,
self.request)
self.check_container_info_return(ret, True)
# tests if 200 is cached and used
def test_container_info_200(self):
with save_globals():
headers = {'x-container-read': self.read_acl,
'x-container-write': self.write_acl}
set_http_connect(200, # account_info is found
200, headers=headers) # container_info is found
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(200, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
# tests if 404 is cached and used
def test_container_info_404(self):
def account_info(self, account, request):
return True, True, 0
with save_globals():
set_http_connect(503, 204, # account_info found
504, 404, 404) # container_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
set_http_connect(503, 404, 404) # account_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_cache_key(self.account, self.container)
cache_value = self.memcache.get(cache_key)
self.assertIsInstance(cache_value, dict)
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
# tests if some http status codes are not cached
def test_container_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
ret = self.controller.container_info(
self.account, self.container, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_container_info_return(ret, True)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_get_account_info_returns_values_as_strings(self):
app = mock.MagicMock()
app._pipeline_request_logging_app = app._pipeline_final_app = app
app.account_existence_skip_cache = 0.0
memcache = mock.MagicMock()
memcache.get = mock.MagicMock()
memcache.get.return_value = {
u'foo': u'\u2603',
u'meta': {u'bar': u'\u2603'},
u'sysmeta': {u'baz': u'\u2603'}}
env = {'PATH_INFO': '/v1/a', 'swift.cache': memcache}
ai = get_account_info(env, app)
# Test info is returned as strings
self.assertEqual(ai.get('foo'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(ai.get('foo'), str)
# Test info['meta'] is returned as strings
m = ai.get('meta', {})
self.assertEqual(m.get('bar'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(m.get('bar'), str)
# Test info['sysmeta'] is returned as strings
m = ai.get('sysmeta', {})
self.assertEqual(m.get('baz'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(m.get('baz'), str)
def test_get_container_info_returns_values_as_strings(self):
app = mock.MagicMock()
app._pipeline_request_logging_app = app._pipeline_final_app = app
app.container_existence_skip_cache = 0.0
memcache = mock.MagicMock()
memcache.get = mock.MagicMock()
memcache.get.return_value = {
u'foo': u'\u2603',
u'meta': {u'bar': u'\u2603'},
u'sysmeta': {u'baz': u'\u2603'},
u'cors': {u'expose_headers': u'\u2603'}}
env = {'PATH_INFO': '/v1/a/c', 'swift.cache': memcache}
ci = get_container_info(env, app)
# Test info is returned as strings
self.assertEqual(ci.get('foo'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(ci.get('foo'), str)
# Test info['meta'] is returned as strings
m = ci.get('meta', {})
self.assertEqual(m.get('bar'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(m.get('bar'), str)
# Test info['sysmeta'] is returned as strings
m = ci.get('sysmeta', {})
self.assertEqual(m.get('baz'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(m.get('baz'), str)
# Test info['cors'] is returned as strings
m = ci.get('cors', {})
self.assertEqual(m.get('expose_headers'), wsgi_to_str('\xe2\x98\x83'))
self.assertIsInstance(m.get('expose_headers'), str)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServerConfiguration(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('test-proxy-config')
def _make_app(self, conf):
self.logger.clear()
# helper function to instantiate a proxy server instance
return proxy_server.Application(conf,
container_ring=FakeRing(),
account_ring=FakeRing(),
logger=self.logger)
def test_auto_create_account(self):
app = self._make_app({})
self.assertEqual(app.auto_create_account_prefix, '.')
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
app = self._make_app({'auto_create_account_prefix': '-'})
self.assertEqual(app.auto_create_account_prefix, '-')
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'
])
def test_node_timeout(self):
# later config should be extended to assert more config options
app = self._make_app({'node_timeout': '3.5',
'recoverable_node_timeout': '1.5'})
self.assertEqual(app.node_timeout, 3.5)
self.assertEqual(app.recoverable_node_timeout, 1.5)
def test_cors_options(self):
# check defaults
app = self._make_app({})
self.assertFalse(app.cors_allow_origin)
self.assertFalse(app.cors_expose_headers)
self.assertTrue(app.strict_cors_mode)
# check custom configs
app = self._make_app({
'cors_allow_origin': '',
'cors_expose_headers': '',
'strict_cors_mode': 'True'})
self.assertTrue(app.strict_cors_mode)
app = self._make_app({
'cors_allow_origin': ' http://X.com,http://Y.com ,, http://Z.com',
'cors_expose_headers': ' custom1,,, custom2,custom3,,',
'strict_cors_mode': 'False'})
self.assertEqual({'http://X.com', 'http://Y.com', 'http://Z.com'},
set(app.cors_allow_origin))
self.assertEqual({'custom1', 'custom2', 'custom3'},
set(app.cors_expose_headers))
self.assertFalse(app.strict_cors_mode)
def test_memcache_recheck_options(self):
# check default options
app = self._make_app({})
self.assertEqual(app.recheck_account_existence, 60)
self.assertEqual(app.recheck_container_existence, 60)
self.assertEqual(app.recheck_updating_shard_ranges, 3600)
self.assertEqual(app.recheck_listing_shard_ranges, 600)
# check custom options
app = self._make_app({'recheck_account_existence': '30',
'recheck_container_existence': '40',
'recheck_updating_shard_ranges': '1800',
'recheck_listing_shard_ranges': ' 900'})
self.assertEqual(app.recheck_account_existence, 30)
self.assertEqual(app.recheck_container_existence, 40)
self.assertEqual(app.recheck_updating_shard_ranges, 1800)
self.assertEqual(app.recheck_listing_shard_ranges, 900)
def test_memcache_skip_options(self):
# check default options
app = self._make_app({})
self.assertEqual(app.container_listing_shard_ranges_skip_cache, 0)
self.assertEqual(app.container_updating_shard_ranges_skip_cache, 0)
# check custom options
app = self._make_app({
'container_listing_shard_ranges_skip_cache_pct': '0.01',
'container_updating_shard_ranges_skip_cache_pct': '0.1'})
self.assertEqual(app.container_listing_shard_ranges_skip_cache, 0.0001)
self.assertEqual(app.container_updating_shard_ranges_skip_cache, 0.001)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
def test_get_object_ring(self):
baseapp = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
with patch_policies([
StoragePolicy(0, 'a', False, object_ring=123),
StoragePolicy(1, 'b', True, object_ring=456),
StoragePolicy(2, 'd', False, object_ring=789)
]):
# None means legacy so always use policy 0
ring = baseapp.get_object_ring(None)
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('0')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('1')
self.assertEqual(ring, 456)
ring = baseapp.get_object_ring('2')
self.assertEqual(ring, 789)
# illegal values
self.assertRaises(ValueError, baseapp.get_object_ring, '99')
self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
raise Exception('this shouldn\'t be caught')
app = MyApp(None, account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 500)
def test_internal_method_request(self):
baseapp = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_inexistent_method_request(self):
baseapp = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_private_method_request(self):
baseapp = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
baseapp.logger = debug_logger()
resp = baseapp.handle_request(
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'UPDATE'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
# Note that UPDATE definitely *isn't* advertised
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), [
'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
# But with appropriate (internal-only) overrides, you can still use it
resp = baseapp.handle_request(
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'UPDATE'},
headers={'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0'}))
# Now we actually make the requests, but there aren't any nodes
self.assertEqual(resp.status, '503 Service Unavailable')
# Bad method with overrides advertises private methods
resp = baseapp.handle_request(
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'BOGUS'},
headers={'X-Backend-Allow-Private-Methods': '1'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), [
'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT', 'UPDATE'])
def test_internal_reserved_name_request(self):
# set account info
fake_cache = FakeMemcache()
fake_cache.store[get_cache_key('a')] = {'status': 200}
app = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
# build internal container request
container = get_reserved_name('c')
req = Request.blank('/v1/a/%s' % container,
{'swift.cache': fake_cache})
app.update_request(req)
# try client request to reserved name
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 412)
self.assertEqual(resp.body, b'Invalid UTF8 or contains NULL')
# set backend header
req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
with mocked_http_conn(200):
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 200)
def test_calls_authorize_allow(self):
called = [False]
def authorize(req):
called[0] = True
with save_globals():
set_http_connect(200)
app = proxy_server.Application(None,
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_calls_authorize_deny(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None,
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_negative_content_length(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
debug_logger(),
FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, b'Invalid Content-Length')
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-123'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, b'Invalid Content-Length')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id(self):
swift_dir = mkdtemp()
try:
logger = debug_logger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
logger,
container_ring=debug_logger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
'REQUEST_METHOD': 'GET'}))
# This is kind of a hokey way to get the transaction ID; it'd be
# better to examine response headers, but the catch_errors
# middleware is what sets the X-Trans-Id header, and we don't have
# that available here.
self.assertTrue(logger.txn_id.endswith('-sardine'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id_length_limit(self):
swift_dir = mkdtemp()
try:
logger = debug_logger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
logger,
container_ring=debug_logger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
'REQUEST_METHOD': 'GET'}))
self.assertTrue(logger.txn_id.endswith(
'-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
container_ring=debug_logger(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
self.assertEqual(resp.status, '403 Forbidden')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_node_timing(self):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.node_timings, {})
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.status_int, 503) # couldn't connect to anything
exp_timings = {}
self.assertEqual(baseapp.node_timings, exp_timings)
times = [time.time()]
exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)}
with mock.patch('swift.proxy.server.time', lambda: times.pop(0)):
baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1)
self.assertEqual(baseapp.node_timings, exp_timings)
nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}]
with mock.patch('swift.proxy.server.shuffle', lambda l: l):
res = baseapp.sort_nodes(nodes)
exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'},
{'ip': '127.0.0.1'}]
self.assertEqual(res, exp_sorting)
def _do_sort_nodes(self, conf, policy_conf, nodes, policy,
node_timings=None):
# Note with shuffling mocked out, sort_nodes will by default return
# nodes in the order they are given
nodes = list(nodes)
conf = dict(conf, policy_config=policy_conf)
baseapp = proxy_server.Application(conf,
logger=debug_logger(),
container_ring=FakeRing(),
account_ring=FakeRing())
if node_timings:
for i, n in enumerate(nodes):
baseapp.set_node_timing(n, node_timings[i])
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app_sorted = baseapp.sort_nodes(nodes, policy)
self.assertFalse(baseapp.logger.get_lines_for_level('warning'))
return baseapp, app_sorted
def test_sort_nodes_default(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
# sanity check - no affinity conf results in node order unchanged
app, actual = self._do_sort_nodes({}, {}, nodes, None)
self.assertEqual(nodes, actual)
def test_sort_nodes_by_affinity_proxy_server_config(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
# proxy-server affinity conf is to prefer r2
conf = {'sorting_method': 'affinity', 'read_affinity': 'r2=1'}
app, actual = self._do_sort_nodes(conf, {}, nodes, None)
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
app, actual = self._do_sort_nodes(conf, {}, nodes, POLICIES[0])
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
# check that node timings are not collected if sorting_method != timing
self.assertFalse(app.sorts_by_timing) # sanity check
self.assertFalse(app.node_timings) # sanity check
# proxy-server affinity conf is to prefer region 1
conf = {'sorting_method': 'affinity', 'read_affinity': 'r1=1'}
app, actual = self._do_sort_nodes(conf, {}, nodes, None)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
app, actual = self._do_sort_nodes(conf, {}, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_sort_nodes_by_affinity_per_policy(self):
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.4'},
{'region': 1, 'zone': 0, 'ip': '127.0.0.3'},
{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 3, 'zone': 0, 'ip': '127.0.0.2'}]
conf = {'sorting_method': 'affinity', 'read_affinity': 'r3=1'}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r1=1'},
'1': {'sorting_method': 'affinity',
'read_affinity': 'r2=1'}}
# policy 0 affinity prefers r1
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0], nodes[2], nodes[3]], actual)
# policy 1 affinity prefers r2
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[1])
self.assertEqual([nodes[2], nodes[0], nodes[1], nodes[3]], actual)
# default affinity prefers r3
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None)
self.assertEqual([nodes[3], nodes[0], nodes[1], nodes[2]], actual)
def test_sort_nodes_by_affinity_per_policy_with_no_default(self):
# no proxy-server setting but policy 0 prefers r0
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 0, 'zone': 2, 'ip': '127.0.0.2'}]
conf = {}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r0=0'}}
# policy 0 uses affinity sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0]], actual)
# any other policy will use default sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None)
self.assertEqual(nodes, actual)
def test_sort_nodes_by_affinity_per_policy_inherits(self):
# policy 0 has read_affinity but no sorting_method override,
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 0, 'zone': 2, 'ip': '127.0.0.2'}]
conf = {}
per_policy = {'0': {'read_affinity': 'r0=0'}}
# policy 0 uses the default sorting method instead of affinity sorting
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual(nodes, actual)
# but if proxy-server sorting_method is affinity then policy 0 inherits
conf = {'sorting_method': 'affinity'}
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0])
self.assertEqual([nodes[1], nodes[0]], actual)
def test_sort_nodes_by_affinity_per_policy_overrides(self):
# default setting is to sort by timing but policy 0 uses read affinity
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
node_timings = [10, 1, 100]
conf = {'sorting_method': 'timing'}
per_policy = {'0': {'sorting_method': 'affinity',
'read_affinity': 'r1=1,r2=2'}}
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0],
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[2], nodes[0]], actual)
# check that timings are collected despite one policy using affinity
self.assertTrue(app.sorts_by_timing)
self.assertEqual(3, len(app.node_timings))
# check app defaults to sorting by timing when no policy specified
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None,
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_sort_nodes_by_timing_per_policy(self):
# default setting is to sort by affinity but policy 0 uses timing
nodes = [{'region': 0, 'zone': 1, 'ip': '127.0.0.3'},
{'region': 1, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2'}]
node_timings = [10, 1, 100]
conf = {'sorting_method': 'affinity', 'read_affinity': 'r1=1,r2=2'}
per_policy = {'0': {'sorting_method': 'timing',
'read_affinity': 'r1=1,r2=2'}, # should be ignored
'1': {'read_affinity': 'r2=1'}}
# policy 0 uses timing
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[0],
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[0], nodes[2]], actual)
self.assertTrue(app.sorts_by_timing)
self.assertEqual(3, len(app.node_timings))
# policy 1 uses policy specific read affinity
app, actual = self._do_sort_nodes(conf, per_policy, nodes, POLICIES[1],
node_timings=node_timings)
self.assertEqual([nodes[2], nodes[0], nodes[1]], actual)
# check that with no policy specified the default read affinity is used
app, actual = self._do_sort_nodes(conf, per_policy, nodes, None,
node_timings=node_timings)
self.assertEqual([nodes[1], nodes[2], nodes[0]], actual)
def test_node_concurrency(self):
nodes = [{'region': 1, 'zone': 1, 'ip': '127.0.0.1', 'port': 6010,
'device': 'sda'},
{'region': 2, 'zone': 2, 'ip': '127.0.0.2', 'port': 6010,
'device': 'sda'},
{'region': 3, 'zone': 3, 'ip': '127.0.0.3', 'port': 6010,
'device': 'sda'}]
timings = {'127.0.0.1': 2, '127.0.0.2': 1, '127.0.0.3': 0}
statuses = {'127.0.0.1': 200, '127.0.0.2': 200, '127.0.0.3': 200}
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'GET'})
def fake_iter_nodes(*arg, **karg):
class FakeNodeIter(object):
num_primary_nodes = 3
def __iter__(self):
return iter(nodes)
return FakeNodeIter()
class FakeConn(object):
def __init__(self, ip, *args, **kargs):
self.ip = ip
self.args = args
self.kargs = kargs
def getresponse(self):
body = 'Response from %s' % self.ip
def mygetheader(header, *args, **kargs):
if header == "Content-Type":
return ""
elif header == "Content-Length":
return str(len(body))
else:
return "1"
resp = mock.Mock()
resp.read.side_effect = [body.encode('ascii'), b'']
resp.getheader = mygetheader
resp.getheaders.return_value = {}
resp.reason = ''
resp.status = statuses[self.ip]
sleep(timings[self.ip])
return resp
def myfake_http_connect_raw(ip, *args, **kargs):
conn = FakeConn(ip, *args, **kargs)
return conn
with mock.patch('swift.proxy.controllers.account.NodeIter',
fake_iter_nodes):
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
myfake_http_connect_raw):
app_conf = {'concurrent_gets': 'on',
'concurrency_timeout': 0}
baseapp = proxy_server.Application(app_conf,
container_ring=FakeRing(),
account_ring=FakeRing())
policy_opts = baseapp.get_policy_options(None)
self.assertTrue(policy_opts.concurrent_gets)
self.assertEqual(policy_opts.concurrency_timeout, 0)
baseapp.update_request(req)
resp = baseapp.handle_request(req)
# Should get 127.0.0.3 as this has a wait of 0 seconds.
self.assertEqual(resp.body, b'Response from 127.0.0.3')
# lets try again, with 127.0.0.1 with 0 timing but returns an
# error.
timings['127.0.0.1'] = 0
statuses['127.0.0.1'] = 500
# Should still get 127.0.0.3 as this has a wait of 0 seconds
# and a success
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.body, b'Response from 127.0.0.3')
# Now lets set the concurrency_timeout
app_conf['concurrency_timeout'] = 2
baseapp = proxy_server.Application(app_conf,
container_ring=FakeRing(),
account_ring=FakeRing())
policy_opts = baseapp.get_policy_options(None)
self.assertEqual(policy_opts.concurrency_timeout, 2)
baseapp.update_request(req)
resp = baseapp.handle_request(req)
# Should get 127.0.0.2 as this has a wait of 1 seconds.
self.assertEqual(resp.body, b'Response from 127.0.0.2')
def test_info_defaults(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertIsInstance(app.disallowed_sections, list)
self.assertEqual(2, len(app.disallowed_sections))
self.assertEqual([
'swift.auto_create_account_prefix',
'swift.valid_api_versions',
], sorted(app.disallowed_sections))
self.assertIsNone(app.admin_key)
def test_get_info_controller(self):
req = Request.blank('/info')
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing())
controller, path_parts = app.get_controller(req)
self.assertIn('version', path_parts)
self.assertIsNone(path_parts['version'])
self.assertIn('disallowed_sections', path_parts)
self.assertIn('expose_info', path_parts)
self.assertIn('admin_key', path_parts)
self.assertEqual(controller.__name__, 'InfoController')
def test_exception_occurred_replication_ip_port_logging(self):
logger = debug_logger('test')
app = proxy_server.Application(
{},
account_ring=FakeRing(separate_replication=True),
container_ring=FakeRing(separate_replication=True),
logger=logger)
app.sort_nodes = lambda nodes, policy: nodes
part = app.container_ring.get_part('a', 'c')
nodes = app.container_ring.get_part_nodes(part)
self.assertNotEqual(nodes[0]['ip'], nodes[0]['replication_ip'])
self.assertEqual(0, sum([node_error_count(app, node)
for node in nodes])) # sanity
# no use_replication header...
req = Request.blank('/v1/a/c')
with mocked_http_conn(200, 503, 200) as mocked_conn:
req.get_response(app)
expected = [(n['ip'], n['port']) for n in nodes[:2]]
actual = [(req['ip'], req['port']) for req in mocked_conn.requests[1:]]
self.assertEqual(expected, actual)
line = logger.get_lines_for_level('error')[-1]
self.assertIn('Container Server', line)
self.assertIn('%s:%s/%s' % (nodes[0]['ip'],
nodes[0]['port'],
nodes[0]['device']), line)
self.assertEqual(1, sum([node_error_count(app, node)
for node in nodes]))
annotated_nodes = [dict(node, use_replication=True) for node in nodes]
self.assertEqual(0, sum([node_error_count(app, node)
for node in annotated_nodes]))
logger.clear()
req = Request.blank(
'/v1/a/c',
headers={'x-backend-use-replication-network': True})
with mocked_http_conn(200, 503, 200):
req.get_response(app)
line = logger.get_lines_for_level('error')[-1]
self.assertIn('Container Server', line)
self.assertIn('%s:%s/%s' % (nodes[0]['replication_ip'],
nodes[0]['replication_port'],
nodes[0]['device']), line)
self.assertEqual(1, sum([node_error_count(app, node)
for node in nodes]))
annotated_nodes = [dict(node, use_replication=True) for node in nodes]
self.assertEqual(1, sum([node_error_count(app, node)
for node in annotated_nodes]))
def test_exception_occurred(self):
def do_test(additional_info):
logger = debug_logger('test')
suppression_limit = 10
app = proxy_server.Application(
{'error_suppression_limit': suppression_limit},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
node_key = app.error_limiter.node_key(node)
self.assertNotIn(node_key, app.error_limiter.stats) # sanity
if six.PY2:
expected_info = additional_info.decode('utf8')
else:
expected_info = additional_info
incremented_limit_samples = []
for i in range(suppression_limit + 1):
try:
raise Exception('kaboom1!')
except Exception as err:
caught_exc = err
app.exception_occurred(
node, 'server-type', additional_info)
self.assertEqual(i + 1, node_error_count(app, node))
line = logger.get_lines_for_level('error')[i]
self.assertIn('server-type server', line)
self.assertIn(expected_info, line)
self.assertIn(node['ip'], line)
self.assertIn(str(node['port']), line)
self.assertIn(node['device'], line)
log_args, log_kwargs = logger.log_dict['error'][i]
self.assertTrue(log_kwargs['exc_info'])
self.assertIs(caught_exc, log_kwargs['exc_info'][1])
incremented_limit_samples.append(
logger.statsd_client.get_increment_counts().get(
'error_limiter.incremented_limit', 0))
self.assertEqual([0] * 10 + [1], incremented_limit_samples)
self.assertEqual(
('Node will be error limited for 60.00s: %s' %
node_to_string(node)),
logger.get_lines_for_level('error')[suppression_limit + 1])
do_test('success')
do_test('succès')
do_test(u'success')
def test_error_occurred(self):
def do_test(msg):
logger = debug_logger('test')
suppression_limit = 10
app = proxy_server.Application(
{'error_suppression_limit': suppression_limit},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
node_key = app.error_limiter.node_key(node)
self.assertNotIn(node_key, app.error_limiter.stats) # sanity
if six.PY2:
expected_msg = msg.decode('utf8')
else:
expected_msg = msg
incremented_limit_samples = []
for i in range(suppression_limit + 1):
app.error_occurred(node, msg)
self.assertEqual(i + 1, node_error_count(app, node))
line = logger.get_lines_for_level('error')[i]
self.assertIn(expected_msg, line)
self.assertIn(node_to_string(node), line)
incremented_limit_samples.append(
logger.statsd_client.get_increment_counts().get(
'error_limiter.incremented_limit', 0))
self.assertEqual([0] * 10 + [1], incremented_limit_samples)
self.assertEqual(
('Node will be error limited for 60.00s: %s' %
node_to_string(node)),
logger.get_lines_for_level('error')[-1])
# error limiting is extended if another error occurs
app.error_occurred(node, msg)
self.assertEqual(suppression_limit + 2,
node_error_count(app, node))
line = logger.get_lines_for_level('error')[-2]
self.assertIn(expected_msg, line)
self.assertIn(node_to_string(node), line)
self.assertEqual(
2, logger.statsd_client.get_increment_counts().get(
'error_limiter.incremented_limit', 0))
self.assertEqual(
('Node will be error limited for 60.00s: %s' %
node_to_string(node)),
logger.get_lines_for_level('error')[-1])
do_test('success')
do_test('succès')
do_test(u'success')
def test_error_limit_methods(self):
logger = debug_logger('test')
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
# error occurred
app.error_occurred(node, 'test msg')
self.assertTrue('test msg' in
logger.get_lines_for_level('error')[-1])
self.assertEqual(1, node_error_count(app, node))
# exception occurred
expected_err = None
try:
raise Exception('kaboom1!')
except Exception as e1:
expected_err = e1
app.exception_occurred(node, 'test1', 'test1 msg')
line = logger.get_lines_for_level('error')[-1]
self.assertIn('test1 server', line)
self.assertIn('test1 msg', line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertIs(log_kwargs['exc_info'][1], expected_err)
self.assertEqual(2, node_error_count(app, node))
# warning exception occurred
try:
raise Exception('kaboom2!')
except Exception as e2:
expected_err = e2
app.exception_occurred(node, 'test2', 'test2 msg',
level=logging.WARNING)
line = logger.get_lines_for_level('warning')[-1]
self.assertIn('test2 server', line)
self.assertIn('test2 msg', line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertIs(log_kwargs['exc_info'][1], expected_err)
self.assertEqual(3, node_error_count(app, node))
# custom exception occurred
try:
raise Exception('kaboom3!')
except Exception as e3:
expected_err = e3
e3_info = sys.exc_info()
try:
raise Exception('kaboom4!')
except Exception:
pass
app.exception_occurred(node, 'test3', 'test3 msg',
level=logging.WARNING, exc_info=e3_info)
line = logger.get_lines_for_level('warning')[-1]
self.assertIn('test3 server', line)
self.assertIn('test3 msg', line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertIs(log_kwargs['exc_info'][1], expected_err)
self.assertEqual(4, node_error_count(app, node))
def test_check_response_200(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=debug_logger())
node = app.container_ring.get_part_nodes(1)[0]
resp = FakeHTTPResponse(Response())
ret = app.check_response(node, 'Container', resp, 'PUT', '/v1/a/c')
self.assertTrue(ret)
error_lines = app.logger.get_lines_for_level('error')
self.assertFalse(error_lines)
self.assertEqual(0, node_error_count(app, node))
def test_check_response_507(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=debug_logger())
node = app.container_ring.get_part_nodes(1)[0]
resp = FakeHTTPResponse(Response(status=507))
ret = app.check_response(node, 'Container', resp, 'PUT', '/v1/a/c')
self.assertFalse(ret)
error_lines = app.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual(
'Node will be error limited for 60.00s: 10.0.0.0:1000/sda, '
'error: ERROR Insufficient Storage', error_lines[0])
self.assertEqual(11, node_error_count(app, node))
self.assertTrue(app.error_limited(node))
app.logger.clear()
ret = app.check_response(node, 'Account', resp, 'PUT', '/v1/a/c',
body='full')
self.assertFalse(ret)
error_lines = app.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual(
'Node will be error limited for 60.00s: 10.0.0.0:1000/sda, '
'error: ERROR Insufficient Storage', error_lines[0])
self.assertEqual(11, node_error_count(app, node))
self.assertTrue(app.error_limited(node))
def test_check_response_503(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=debug_logger())
node = app.container_ring.get_part_nodes(1)[0]
resp = FakeHTTPResponse(Response(status=503))
app.logger.clear()
ret = app.check_response(node, 'Container', resp, 'PUT', '/v1/a/c')
self.assertFalse(ret)
error_lines = app.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual('ERROR 503 Trying to PUT /v1/a/c From Container '
'Server 10.0.0.0:1000/sda', error_lines[0])
self.assertEqual(1, node_error_count(app, node))
self.assertFalse(app.error_limited(node))
app.logger.clear()
ret = app.check_response(node, 'Object', resp, 'GET', '/v1/a/c/o',
body='full')
self.assertFalse(ret)
error_lines = app.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertEqual('ERROR 503 full Trying to GET /v1/a/c/o From Object '
'Server 10.0.0.0:1000/sda', error_lines[0])
self.assertEqual(2, node_error_count(app, node))
self.assertFalse(app.error_limited(node))
def test_valid_api_version(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing())
# The version string is only checked for account, container and object
# requests; the raised APIVersionError returns a 404 to the client
for path in [
'/v2/a',
'/v2/a/c',
'/v2/a/c/o']:
req = Request.blank(path)
self.assertRaises(APIVersionError, app.get_controller, req)
# Default valid API versions are ok
for path in [
'/v1/a',
'/v1/a/c',
'/v1/a/c/o',
'/v1.0/a',
'/v1.0/a/c',
'/v1.0/a/c/o']:
req = Request.blank(path)
controller, path_parts = app.get_controller(req)
self.assertIsNotNone(controller)
# Ensure settings valid API version constraint works
for version in ["42", 42]:
try:
with NamedTemporaryFile('w+t') as f:
f.write('[swift-constraints]\n')
f.write('valid_api_versions = %s\n' % version)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
req = Request.blank('/%s/a' % version)
controller, _ = app.get_controller(req)
self.assertIsNotNone(controller)
# In this case v1 is invalid
req = Request.blank('/v1/a')
self.assertRaises(APIVersionError, app.get_controller, req)
finally:
constraints.reload_constraints()
# Check that the valid_api_versions is not exposed by default
req = Request.blank('/info')
controller, path_parts = app.get_controller(req)
self.assertTrue('swift.valid_api_versions' in
path_parts.get('disallowed_sections'))
def test_statsd_prefix(self):
app = proxy_server.Application({'log_statsd_host': 'example.com'},
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertIsNotNone(app.logger.logger.statsd_client)
self.assertIsInstance(app.logger.logger.statsd_client,
StatsdClient)
self.assertEqual('proxy-server.',
app.logger.logger.statsd_client._prefix)
app = proxy_server.Application({'log_statsd_metric_prefix': 'foo',
'log_name': 'bar',
'log_statsd_host': 'example.com'},
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertIsNotNone(app.logger.logger.statsd_client)
self.assertIsInstance(app.logger.logger.statsd_client,
StatsdClient)
self.assertEqual('foo.proxy-server.',
app.logger.logger.statsd_client._prefix)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
])
class TestProxyServerLoading(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
utils.HASH_PATH_SUFFIX = b'endcap'
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
for policy in POLICIES:
policy.object_ring = None
def test_float_timeouts(self):
conf = {
'node_timeout': '2.3',
'recoverable_node_timeout': '1.4',
'conn_timeout': '0.7',
'client_timeout': '1.7',
'post_quorum_timeout': '0.3',
'concurrency_timeout': '0.2',
}
for policy in POLICIES:
policy.object_ring = FakeRing()
app = proxy_server.Application(conf, debug_logger(),
FakeRing(), FakeRing())
self.assertEqual(app.node_timeout, 2.3)
self.assertEqual(app.recoverable_node_timeout, 1.4)
self.assertEqual(app.conn_timeout, 0.7)
self.assertEqual(app.client_timeout, 1.7)
self.assertEqual(app.post_quorum_timeout, 0.3)
self.assertEqual(app.get_policy_options(
None).concurrency_timeout, 0.2)
def test_concurrent_ec_options(self):
conf = {
'concurrent_gets': 'on',
'concurrency_timeout': '0.5',
'concurrent_ec_extra_requests': '4',
}
for policy in POLICIES:
policy.object_ring = FakeRing()
app = proxy_server.Application(conf, debug_logger(),
FakeRing(), FakeRing())
for policy in POLICIES:
policy_opts = app.get_policy_options(policy)
self.assertEqual(policy_opts.concurrent_ec_extra_requests, 4)
self.assertEqual(policy_opts.concurrent_gets, True)
self.assertEqual(policy_opts.concurrency_timeout, 0.5)
def test_load_policy_rings(self):
for policy in POLICIES:
self.assertFalse(policy.object_ring)
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
for policy in POLICIES:
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
write_fake_ring(object_ring_path)
app = loadapp(conf_path)._pipeline_final_app
# validate loaded rings
self.assertEqual(app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(app.container_ring.serialized_path,
container_ring_path)
for policy in POLICIES:
self.assertEqual(policy.object_ring,
app.get_object_ring(int(policy)))
def test_missing_rings(self):
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
ring_paths = [
os.path.join(self.tempdir, 'account.ring.gz'),
os.path.join(self.tempdir, 'container.ring.gz'),
]
for policy in POLICIES:
self.assertFalse(policy.object_ring)
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
ring_paths.append(object_ring_path)
for policy in POLICIES:
self.assertFalse(policy.object_ring)
for ring_path in ring_paths:
self.assertFalse(os.path.exists(ring_path))
self.assertRaises(IOError, loadapp, conf_path)
write_fake_ring(ring_path)
# all rings exist, app should load
loadapp(conf_path)
for policy in POLICIES:
self.assertTrue(policy.object_ring)
@patch_policies()
class TestProxyServerConfigLoading(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
_test_servers[0].error_limiter.stats.clear() # clear out errors
self.tempdir = mkdtemp()
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
def tearDown(self):
rmtree(self.tempdir)
def _write_conf(self, conf_body):
# this is broken out to a method so that subclasses can override
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
return conf_path
def _default_conf(self):
conf_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'../../../',
'etc/proxy-server.conf-sample'
))
with open(conf_path) as f:
conf_body = f.read()
fixed_body = conf_body.replace('# swift_dir = /etc/swift',
'swift_dir = %s' % self.tempdir)
conf_path = self._write_conf(fixed_body)
return conf_path
def test_default_proxy_config(self):
app = loadapp(self._default_conf())
req = Request.blank('/info')
resp = req.get_response(app)
self.assertEqual(resp.status_int, 200)
def _write_conf_and_load_app(self, conf_sections, app_name='proxy-server'):
# write proxy-server.conf file, load app
conf_body = dedent("""
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = %s
%s
""") % (self.tempdir, app_name, dedent(conf_sections))
conf_path = self._write_conf(conf_body)
with mock.patch('swift.proxy.server.get_logger',
return_value=debug_logger()):
app = loadapp(conf_path, allow_modify_pipeline=False)
return app
def _check_policy_options(self, app, exp_options, exp_is_local):
# verify expected config
for policy, options in exp_options.items():
for k, v in options.items():
actual = getattr(app.get_policy_options(policy), k)
if k == "write_affinity_node_count_fn":
if policy: # this check only applies when using a policy
actual = actual(policy.object_ring.replica_count)
self.assertEqual(v, actual)
continue
self.assertEqual(v, actual,
"Expected %s=%s but got %s=%s for policy %s" %
(k, v, k, actual, policy))
for policy, nodes in exp_is_local.items():
fn = app.get_policy_options(policy).write_affinity_is_local_fn
if nodes is None:
self.assertIsNone(fn)
continue
for node, expected_result in nodes:
actual = fn(node)
self.assertIs(expected_result, actual,
"Expected %s but got %s for %s, policy %s" %
(expected_result, actual, node, policy))
def test_per_policy_conf_none_configured(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
"""
expected_default = {"read_affinity": "",
"sorting_method": "shuffle",
"write_affinity": "",
"write_affinity_node_count_fn": 6}
exp_options = {None: expected_default,
POLICIES[0]: expected_default,
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: None,
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_one_configured(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=100
write_affinity = r1
write_affinity_node_count = 1 * replicas
write_affinity_handoff_delete_count = 4
rebalance_missing_suppression_count = 2
"""
expected_default = {"read_affinity": "",
"sorting_method": "shuffle",
"write_affinity": "",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None,
"rebalance_missing_suppression_count": 1}
exp_options = {None: expected_default,
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 4,
"rebalance_missing_suppression_count": 2},
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
default_options = app.get_policy_options(None)
self.assertEqual(
"ProxyOverrideOptions({}, {'sorting_method': 'shuffle', "
"'read_affinity': '', 'write_affinity': '', "
"'write_affinity_node_count': '2 * replicas', "
"'write_affinity_handoff_delete_count': None, "
"'rebalance_missing_suppression_count': 1, "
"'concurrent_gets': False, 'concurrency_timeout': 0.5, "
"'concurrent_ec_extra_requests': 0"
"}, app)",
repr(default_options))
self.assertEqual(default_options, eval(repr(default_options), {
'ProxyOverrideOptions': default_options.__class__, 'app': app}))
policy_0_options = app.get_policy_options(POLICIES[0])
self.assertEqual(
"ProxyOverrideOptions({}, {'sorting_method': 'affinity', "
"'read_affinity': 'r1=100', 'write_affinity': 'r1', "
"'write_affinity_node_count': '1 * replicas', "
"'write_affinity_handoff_delete_count': 4, "
"'rebalance_missing_suppression_count': 2, "
"'concurrent_gets': False, 'concurrency_timeout': 0.5, "
"'concurrent_ec_extra_requests': 0"
"}, app)",
repr(policy_0_options))
self.assertEqual(policy_0_options, eval(repr(policy_0_options), {
'ProxyOverrideOptions': default_options.__class__, 'app': app}))
self.assertNotEqual(default_options, policy_0_options)
policy_1_options = app.get_policy_options(POLICIES[1])
self.assertIs(default_options, policy_1_options)
def test_per_policy_conf_equality(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
"""
app = self._write_conf_and_load_app(conf_sections)
self.assertIs(app.get_policy_options(None),
app.get_policy_options(POLICIES[0]))
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
[proxy-server:policy:0]
concurrent_ec_extra_requests = 1
"""
app = self._write_conf_and_load_app(conf_sections)
self.assertNotEqual(app.get_policy_options(None),
app.get_policy_options(POLICIES[0]))
def test_per_policy_conf_inherits_defaults(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
write_affinity_node_count = 1 * replicas
write_affinity_handoff_delete_count = 3
[proxy-server:policy:0]
read_affinity = r1=100
write_affinity = r1
"""
expected_default = {"read_affinity": "",
"sorting_method": "affinity",
"write_affinity": "",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 3}
exp_options = {None: expected_default,
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 3},
POLICIES[1]: expected_default}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: None}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_overrides_default_affinity(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r2=10
write_affinity_node_count = 1 * replicas
write_affinity = r2
write_affinity_handoff_delete_count = 2
[proxy-server:policy:0]
read_affinity = r1=100
write_affinity = r1
write_affinity_node_count = 5
write_affinity_handoff_delete_count = 3
[proxy-server:policy:1]
read_affinity = r1=1
write_affinity = r3
write_affinity_node_count = 4
write_affinity_handoff_delete_count = 4
"""
exp_options = {None: {"read_affinity": "r2=10",
"sorting_method": "affinity",
"write_affinity": "r2",
"write_affinity_node_count_fn": 3,
"write_affinity_handoff_delete_count": 2},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity",
"write_affinity": "r1",
"write_affinity_node_count_fn": 5,
"write_affinity_handoff_delete_count": 3},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity",
"write_affinity": "r3",
"write_affinity_node_count_fn": 4,
"write_affinity_handoff_delete_count": 4}}
exp_is_local = {POLICIES[0]: [({'region': 1, 'zone': 2}, True),
({'region': 2, 'zone': 1}, False)],
POLICIES[1]: [({'region': 3, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False),
({'region': 2, 'zone': 1}, False)]}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_overrides_default_sorting_method(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = timing
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=100
[proxy-server:policy:1]
sorting_method = affinity
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "",
"sorting_method": "timing"},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "affinity"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_with_DEFAULT_options(self):
conf_body = """
[DEFAULT]
write_affinity = r0
read_affinity = r0=100
swift_dir = %s
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# in a paste-deploy section, DEFAULT section value overrides
write_affinity = r2
# ...but the use of 'set' overrides the DEFAULT section value
set read_affinity = r1=100
[proxy-server:policy:0]
# not a paste-deploy section so any value here overrides DEFAULT
sorting_method = affinity
write_affinity = r2
read_affinity = r2=100
[proxy-server:policy:1]
sorting_method = affinity
""" % self.tempdir
# Don't just use _write_conf_and_load_app, as we don't want to have
# duplicate DEFAULT sections
conf_path = self._write_conf(conf_body)
with mock.patch('swift.proxy.server.get_logger',
return_value=debug_logger()):
app = loadapp(conf_path, allow_modify_pipeline=False)
exp_options = {
# default read_affinity is r1, set in proxy-server section
None: {"read_affinity": "r1=100",
"sorting_method": "shuffle",
"write_affinity": "r0",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None},
# policy 0 read affinity is r2, dictated by policy 0 section
POLICIES[0]: {"read_affinity": "r2=100",
"sorting_method": "affinity",
"write_affinity": "r2",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None},
# policy 1 read_affinity is r0, dictated by DEFAULT section,
# overrides proxy server section
POLICIES[1]: {"read_affinity": "r0=100",
"sorting_method": "affinity",
"write_affinity": "r0",
"write_affinity_node_count_fn": 6,
"write_affinity_handoff_delete_count": None}}
exp_is_local = {
# default write_affinity is r0, dictated by DEFAULT section
None: [({'region': 0, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False)],
# policy 0 write_affinity is r2, dictated by policy 0 section
POLICIES[0]: [({'region': 0, 'zone': 2}, False),
({'region': 2, 'zone': 1}, True)],
# policy 1 write_affinity is r0, inherited from default
POLICIES[1]: [({'region': 0, 'zone': 2}, True),
({'region': 1, 'zone': 1}, False)]}
self._check_policy_options(app, exp_options, exp_is_local)
def test_per_policy_conf_warns_about_sorting_method_mismatch(self):
# verify that policy specific warnings are emitted when read_affinity
# is set but sorting_method is not affinity
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
read_affinity = r2=10
sorting_method = timing
[proxy-server:policy:0]
read_affinity = r1=100
[proxy-server:policy:1]
sorting_method = affinity
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "r2=10",
"sorting_method": "timing"},
POLICIES[0]: {"read_affinity": "r1=100",
"sorting_method": "timing"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
lines = app.logger.get_lines_for_level('warning')
labels = {'default', 'policy 0 (nulo)'}
for line in lines[:2]:
self.assertIn(
"sorting_method is set to 'timing', not 'affinity'", line)
for label in labels:
if label in line:
labels.remove(label)
break
else:
self.fail("None of %s found in warning: %r" % (labels, line))
self.assertFalse(labels)
def test_per_policy_conf_warns_override_sorting_method_mismatch(self):
# verify that policy specific warnings are emitted when read_affinity
# is set but sorting_method is not affinity in a policy config
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r2=10
[proxy-server:policy:0]
sorting_method = timing
"""
exp_options = {None: {"read_affinity": "r2=10",
"write_affinity": "",
"sorting_method": "affinity"},
POLICIES[0]: {"read_affinity": "r2=10",
"write_affinity": "",
"sorting_method": "timing"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
lines = app.logger.get_lines_for_level('warning')
for line in lines:
# proxy-server gets instantiated twice during loadapp so expect two
# warnings; check that both warnings refer to policy 0 and not the
# default config
self.assertIn(
"sorting_method is set to 'timing', not 'affinity'", line)
self.assertIn('policy 0 (nulo)', line)
self.assertFalse(lines[2:])
def test_per_policy_conf_section_name_inherits_from_app_section_name(self):
conf_sections = """
[app:proxy-srv]
use = egg:swift#proxy
sorting_method = affinity
[proxy-server:policy:0]
sorting_method = timing
# ignored!
[proxy-srv:policy:1]
sorting_method = shuffle
"""
exp_options = {None: {'sorting_method': 'affinity'},
POLICIES[0]: {'sorting_method': 'affinity'},
POLICIES[1]: {'sorting_method': 'shuffle'}}
app = self._write_conf_and_load_app(conf_sections, 'proxy-srv')
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_with_unknown_policy(self):
# verify that unknown policy section raises an error
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
read_affinity = r2=10
sorting_method = affinity
[proxy-server:policy:999]
read_affinity = r2z1=1
"""
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('No policy found for override config, index: 999',
cm.exception.args[0])
def test_per_policy_conf_sets_timing_sorting_method(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
[proxy-server:policy:0]
sorting_method = timing
[proxy-server:policy:1]
read_affinity = r1=1
"""
exp_options = {None: {"read_affinity": "",
"sorting_method": "affinity"},
POLICIES[0]: {"read_affinity": "",
"sorting_method": "timing"},
POLICIES[1]: {"read_affinity": "r1=1",
"sorting_method": "affinity"}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
def test_per_policy_conf_invalid_sorting_method_value(self):
def do_test(conf_sections, scope):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertEqual(
'Invalid sorting_method value; must be one of shuffle, '
"timing, affinity, not 'broken' for %s" % scope,
cm.exception.args[0])
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = shuffle
[proxy-server:policy:0]
sorting_method = broken
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = broken
[proxy-server:policy:0]
sorting_method = shuffle
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_invalid_read_affinity_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('broken', cm.exception.args[0])
self.assertIn(
'Invalid read_affinity value:', cm.exception.args[0])
self.assertIn(label, cm.exception.args[0])
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = r1=1
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = broken
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
sorting_method = affinity
read_affinity = broken
[proxy-server:policy:0]
sorting_method = affinity
read_affinity = r1=1
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_invalid_write_affinity_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('broken', cm.exception.args[0])
self.assertIn(
'Invalid write_affinity value:', cm.exception.args[0])
self.assertIn(label, cm.exception.args[0])
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity = r1
[proxy-server:policy:0]
sorting_method = affinity
write_affinity = broken
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity = broken
[proxy-server:policy:0]
write_affinity = r1
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_invalid_write_affinity_node_count_value(self):
def do_test(conf_sections, label):
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertIn('2* replicas', cm.exception.args[0])
self.assertIn('Invalid write_affinity_node_count value:',
cm.exception.args[0])
self.assertIn(label, cm.exception.args[0])
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity_node_count = 2 * replicas
[proxy-server:policy:0]
sorting_method = affinity
write_affinity_node_count = 2* replicas
"""
do_test(conf_sections, 'policy 0 (nulo)')
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
write_affinity_node_count = 2* replicas
[proxy-server:policy:0]
write_affinity_node_count = 2 * replicas
"""
do_test(conf_sections, '(default)')
def test_per_policy_conf_bad_section_name(self):
def do_test(policy):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
[proxy-server:policy:%s]
""" % policy
with self.assertRaises(ValueError) as cm:
self._write_conf_and_load_app(conf_sections)
self.assertEqual(
"Override config must refer to policy index: %r" % policy,
cm.exception.args[0])
do_test('')
do_test('uno')
do_test('0.0')
def test_per_policy_conf_overrides_default_concurrency_settings(self):
conf_sections = """
[app:proxy-server]
use = egg:swift#proxy
concurrent_gets = True
concurrency_timeout = 0.5
[proxy-server:policy:0]
concurrent_gets = off
concurrency_timeout = 0.6
[proxy-server:policy:1]
concurrent_gets = True
concurrency_timeout = 0.3
concurrent_ec_extra_requests = 1
"""
exp_options = {
None: {
"concurrent_gets": True,
"concurrency_timeout": 0.5,
"concurrent_ec_extra_requests": 0,
}, POLICIES[0]: {
"concurrent_gets": False,
"concurrency_timeout": 0.6,
"concurrent_ec_extra_requests": 0,
}, POLICIES[1]: {
"concurrent_gets": True,
"concurrency_timeout": 0.3,
"concurrent_ec_extra_requests": 1,
}}
app = self._write_conf_and_load_app(conf_sections)
self._check_policy_options(app, exp_options, {})
def test_log_name(self):
# defaults...
conf_sections = """
[DEFAULT]
log_statsd_host = example.com
swift_dir = %s
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
""" % self.tempdir
conf_path = self._write_conf(dedent(conf_sections))
with mock.patch('swift.common.utils.StatsdClient') as mock_statsd:
app = loadapp(conf_path, allow_modify_pipeline=False)
# logger name is hard-wired 'proxy-server'
self.assertEqual('proxy-server', app.logger.name)
self.assertEqual('swift', app.logger.server)
mock_statsd.assert_called_once_with(
'example.com', 8125, '', 'proxy-server', 1.0, 1.0,
logger=app.logger.logger)
conf_sections = """
[DEFAULT]
log_name = test-name
log_statsd_host = example.com
swift_dir = %s
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
""" % self.tempdir
conf_path = self._write_conf(dedent(conf_sections))
with mock.patch('swift.common.utils.StatsdClient') as mock_statsd:
app = loadapp(conf_path, allow_modify_pipeline=False)
# logger name is hard-wired 'proxy-server'
self.assertEqual('proxy-server', app.logger.name)
# server is defined by log_name option
self.assertEqual('test-name', app.logger.server)
# statsd tail prefix is hard-wired 'proxy-server'
mock_statsd.assert_called_once_with(
'example.com', 8125, '', 'proxy-server', 1.0, 1.0,
logger=app.logger.logger)
class TestProxyServerConfigStringLoading(TestProxyServerConfigLoading):
# The proxy may be loaded from a conf string rather than a conf file, for
# example when ContainerSync creates an InternalClient from a default
# config string. So repeat super-class tests using a string loader.
def _write_conf(self, conf_body):
# this is broken out to a method so that subclasses can override
return ConfigString(conf_body)
class BaseTestObjectController(object):
"""
A root of TestObjController that implements helper methods for child
TestObjControllers.
"""
def setUp(self):
# clear proxy logger result for each test
_test_servers[0].logger._clear()
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res: # noqa: F841
pass
self.assertEqual(res.status_int, expected)
# repeat test
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res: # noqa: F841
pass
self.assertEqual(res.status_int, expected)
def _sleep_enough(self, condition):
for sleeptime in (0.1, 1.0):
sleep(sleeptime)
if condition():
break
def put_container(self, policy_name, container_name, prolis=None):
# Note: only works if called with unpatched policies
prolis = prolis or _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (container_name, policy_name)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
def _test_conditional_GET(self, policy):
container_name = uuid.uuid4().hex
object_path = '/v1/a/%s/conditionals' % container_name
self.put_container(policy.name, container_name)
obj = b'this object has an etag and is otherwise unimportant'
etag = md5(obj, usedforsecurity=False).hexdigest()
not_etag = md5(obj + b"blahblah", usedforsecurity=False).hexdigest()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (object_path, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
for verb, body in (('GET', obj), ('HEAD', b'')):
# If-Match
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 412)
self.assertEqual(etag, resp.headers.get('etag'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
# If-None-Match
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
req = Request.blank(
object_path,
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
self.assertEqual(etag, resp.headers.get('etag'))
self.assertEqual('bytes', resp.headers.get('accept-ranges'))
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_pipeline(self):
conf = _test_context['conf']
conf['client_timeout'] = 0.1
prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
with in_process_proxy(
prosrv, socket_timeout=conf['client_timeout']) as prolis:
self.put_container(self.policy.name, self.policy.name,
prolis=prolis)
obj = b'0123456' * 11 * 17
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (
self.policy.name,
len(obj),
)).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
for line in headers.splitlines():
if b'Content-Length' in line:
h, v = line.split()
content_length = int(v.strip())
break
else:
self.fail("Didn't find content-length in %r" % (headers,))
gotten_obj = fd.read(content_length)
self.assertEqual(gotten_obj, obj)
sleep(0.3) # client_timeout should kick us off
fd.write(('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.policy.name).encode('ascii'))
fd.flush()
# makefile is a little weird, but this is disconnected
self.assertEqual(b'', fd.read())
# I expected this to raise a socket error
self.assertEqual(b'', sock.recv(1024))
# ... but we ARE disconnected
with self.assertRaises(socket.error) as caught:
sock.send(b'test')
self.assertEqual(caught.exception.errno, errno.EPIPE)
# and logging confirms we've timed out
last_debug_msg = prosrv.logger.get_lines_for_level('debug')[-1]
self.assertIn('timed out', last_debug_msg)
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(base_port=3000))])
class TestReplicatedObjectController(
BaseTestObjectController, unittest.TestCase):
"""
Test suite for replication policy
"""
def setUp(self):
skip_if_no_xattrs()
_test_servers[0].error_limiter.stats.clear() # clear out errors
self.logger = debug_logger('proxy-ut')
self.app = proxy_server.Application(
None,
logger=self.logger,
account_ring=FakeRing(),
container_ring=FakeRing())
self.policy = POLICIES[0]
super(TestReplicatedObjectController, self).setUp()
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
@unpatch_policies
def test_policy_IO(self):
def check_file(policy, cont, devs, check_val):
partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, debug_logger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
policy=policy)
if check_val is True:
file.open()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# check policy 0: put file on c, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = b'test_object0'
path = '/v1/a/c/o'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
path = '/v1/a/c1/o'
obj = b'test_object1'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
path = '/v1/a/c2/o'
obj = b'test_object2'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
if hasattr(_test_servers[-1], '_filesystem'):
# ironically, the _filesystem attribute on the object server means
# the in-memory diskfile is in use, so this test does not apply
return
prosrv = _test_servers[0]
# validate container policy is 1
req = Request.blank('/v1/a/c1', method='HEAD')
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204) # sanity check
self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
# check overrides: put it in policy 2 (not where the container says)
req = Request.blank(
'/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': BytesIO(b"hello")},
headers={'Content-Type': 'text/plain',
'Content-Length': '5',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 201) # sanity check
# go to disk to make sure it's there
partition, nodes = prosrv.get_object_ring(2).get_nodes(
'a', 'c1', 'wrong-o')
node = nodes[0]
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, debug_logger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = b''.join(df.reader())
self.assertEqual(contents, b"hello")
# can't get it from the normal place
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 404) # sanity check
# but we can get it from policy 2
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, b'hello')
# and we can delete it the same way
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
self.assertGreater(float(e.timestamp), 0)
else:
self.fail('did not raise DiskFileNotExist')
@unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = b'a' * (1024 * 1024)
path = '/v1/a/c/o.large'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'application/octet-stream',
'X-Newest': 'true'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
@unpatch_policies
def test_GET_ranges(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = ''.join(
('beans lots of beans lots of beans lots of beans yeah %04d ' % i)
for i in range(100)).encode('ascii')
path = '/v1/a/c/o.beans'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one byte range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
self.assertEqual(res.body, obj[10:201])
req = Request.blank(path, environ={'REQUEST_METHOD': 'GET'}, headers={
'Content-Type': 'application/octet-stream',
'X-Backend-Ignore-Range-If-Metadata-Present': 'Content-Type',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
# multiple byte ranges
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200,1000-1099,4123-4523'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges')
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary)
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
got_mime_docs = []
for mime_doc_fh in iter_multipart_mime_documents(BytesIO(res.body),
boundary):
headers = parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_mime_docs.append((headers, body))
self.assertEqual(len(got_mime_docs), 3)
first_range_headers = got_mime_docs[0][0]
first_range_body = got_mime_docs[0][1]
self.assertEqual(first_range_headers['Content-Range'],
'bytes 10-200/5800')
self.assertEqual(first_range_body, obj[10:201])
second_range_headers = got_mime_docs[1][0]
second_range_body = got_mime_docs[1][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 1000-1099/5800')
self.assertEqual(second_range_body, obj[1000:1100])
second_range_headers = got_mime_docs[2][0]
second_range_body = got_mime_docs[2][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 4123-4523/5800')
self.assertEqual(second_range_body, obj[4123:4524])
@unpatch_policies
def test_GET_bad_range_zero_byte(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
path = '/v1/a/c/o.zerobyte'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path,)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# bad byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=spaghetti-carbonara'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, b'')
# not a byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'Kotta'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, b'')
@unpatch_policies
def test_PUT_GET_unicode_metadata(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
path = b'/v1/a/c/o.zerobyte'
fd.write(b'PUT %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'X-Storage-Token: t\r\n'
b'Expect: 100-continue\r\n'
b'Transfer-Encoding: chunked\r\n'
b'Content-Type: application/octet-stream\r\n'
b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d\r\n'
b'\r\n0\r\n\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 100'
self.assertEqual(headers[:len(exp)], exp)
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(b'GET %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d',
headers.split(b'\r\n'))
@unpatch_policies
def test_HEAD_absolute_uri(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
# sanity, this resource is created in setup
path = b'/v1/a'
fd.write(b'HEAD %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
# RFC says we should accept this, too
abs_path = b'http://saio.example.com:8080/v1/a'
fd.write(b'HEAD %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n' % (abs_path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_GET_short_read(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = (b''.join(
(b'%d bottles of beer on the wall\n' % i)
for i in reversed(range(1, 200))))
# if the object is too short, then we don't have a mid-stream
# exception after the headers are sent, but instead an early one
# before the headers
self.assertGreater(len(obj), wsgi.MINIMUM_CHUNK_SIZE)
path = '/v1/a/c/o.bottles'
fd.write(('PUT %s HTTP/1.1\r\n'
'Connection: keep-alive\r\n'
'Host: localhost\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/beer-stream\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# go shorten that object by a few bytes
shrinkage = 100 # bytes
shortened = 0
for dirpath, _, filenames in os.walk(_testdir):
for filename in filenames:
if filename.endswith(".data"):
with open(os.path.join(dirpath, filename), "r+") as fh:
fh.truncate(len(obj) - shrinkage)
shortened += 1
self.assertGreater(shortened, 0) # ensure test is working
real_fstat = os.fstat
# stop the object server from immediately quarantining the object
# and returning 404
def lying_fstat(fd):
sr = real_fstat(fd)
fake_stat_result = posix.stat_result((
sr.st_mode, sr.st_ino, sr.st_dev, sr.st_nlink, sr.st_uid,
sr.st_gid,
sr.st_size + shrinkage, # here's the lie
sr.st_atime, sr.st_mtime, sr.st_ctime))
return fake_stat_result
# Read the object back
with mock.patch('os.fstat', lying_fstat), \
mock.patch.object(prosrv, 'client_chunk_size', 32), \
mock.patch.object(prosrv, 'object_chunk_size', 32):
fd.write(('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: keep-alive\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % (path,)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
obj_parts = []
while True:
buf = fd.read(1024)
if not buf:
break
obj_parts.append(buf)
got_obj = b''.join(obj_parts)
self.assertLessEqual(len(got_obj), len(obj) - shrinkage)
# Make sure the server closed the connection
with self.assertRaises(socket.error):
# Two calls are necessary; you can apparently write to a socket
# that the peer has closed exactly once without error, then the
# kernel discovers that the connection is not open and
# subsequent send attempts fail.
sock.sendall(b'GET /info HTTP/1.1\r\n')
sock.sendall(b'Host: localhost\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n')
@unpatch_policies
def test_GET_short_read_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = (b''.join(
(b'%d bottles of beer on the wall\n' % i)
for i in reversed(range(1, 200))))
# if the object is too short, then we don't have a mid-stream
# exception after the headers are sent, but instead an early one
# before the headers
self.assertGreater(len(obj), wsgi.MINIMUM_CHUNK_SIZE)
path = '/v1/a/c/o.bottles'
fd.write(('PUT %s HTTP/1.1\r\n'
'Connection: keep-alive\r\n'
'Host: localhost\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/beer-stream\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# we shorten the first replica of the object by 200 bytes and leave
# the others untouched
_, obj_nodes = POLICIES.default.object_ring.get_nodes(
"a", "c", "o.bottles")
shortened = 0
for dirpath, _, filenames in os.walk(
os.path.join(_testdir, obj_nodes[0]['device'])):
for filename in filenames:
if filename.endswith(".data"):
if shortened == 0:
with open(os.path.join(dirpath, filename), "r+") as fh:
fh.truncate(len(obj) - 200)
shortened += 1
self.assertEqual(shortened, 1) # sanity check
real_fstat = os.fstat
# stop the object server from immediately quarantining the object
# and returning 404
def lying_fstat(fd):
sr = real_fstat(fd)
fake_stat_result = posix.stat_result((
sr.st_mode, sr.st_ino, sr.st_dev, sr.st_nlink, sr.st_uid,
sr.st_gid,
len(obj), # sometimes correct, sometimes not
sr.st_atime, sr.st_mtime, sr.st_ctime))
return fake_stat_result
# Read the object back
with mock.patch('os.fstat', lying_fstat), \
mock.patch.object(prosrv, 'client_chunk_size', 32), \
mock.patch.object(prosrv, 'object_chunk_size', 32), \
mock.patch.object(prosrv, 'sort_nodes',
lambda nodes, **kw: nodes):
fd.write(('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % (path,)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
obj_parts = []
while True:
buf = fd.read(1024)
if not buf:
break
obj_parts.append(buf)
got_obj = b''.join(obj_parts)
# technically this is a redundant test, but it saves us from screens
# full of error message when got_obj is shorter than obj
self.assertEqual(len(obj), len(got_obj))
self.assertEqual(obj, got_obj)
@unpatch_policies
def test_GET_ranges_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = (b''.join(
(b'Smurf! The smurfing smurf is completely smurfed. %03d ' % i)
for i in range(1000)))
path = '/v1/a/c/o.smurfs'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/smurftet-stream\r\n'
'\r\n' % (path, str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
kaboomed = [0]
bytes_before_timeout = [None]
class FileLikeKaboom(object):
def __init__(self, inner_file_like):
self.inner_file_like = inner_file_like
# close(), etc.
def __getattr__(self, attr):
return getattr(self.inner_file_like, attr)
def readline(self, *a, **kw):
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
result = self.inner_file_like.readline(*a, **kw)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
def read(self, length=None):
result = self.inner_file_like.read(length)
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
orig_hrtdi = swift.common.request_helpers. \
http_response_to_document_iters
# Use this to mock out http_response_to_document_iters. On the first
# call, the result will be sabotaged to blow up with
# ChunkReadTimeout after some number of bytes are read. On
# subsequent calls, no sabotage will be added.
def sabotaged_hrtdi(*a, **kw):
resp_parts = orig_hrtdi(*a, **kw)
for sb, eb, l, h, range_file in resp_parts:
if bytes_before_timeout[0] <= 0:
# simulate being unable to read MIME part of
# multipart/byteranges response
kaboomed[0] += 1
raise ChunkReadTimeout(None)
boomer = FileLikeKaboom(range_file)
yield sb, eb, l, h, boomer
sabotaged = [False]
def single_sabotage_hrtdi(*a, **kw):
if not sabotaged[0]:
sabotaged[0] = True
return sabotaged_hrtdi(*a, **kw)
else:
return orig_hrtdi(*a, **kw)
# We want sort of an end-to-end test of object resuming, so what we
# do is mock out stuff so the proxy thinks it only read a certain
# number of bytes before it got a timeout.
bytes_before_timeout[0] = 300
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=0-500'})
res = req.get_response(prosrv)
body = res.body # read the whole thing
self.assertEqual(kaboomed[0], 1) # sanity check
self.assertEqual(res.status_int, 206)
self.assertEqual(len(body), 501)
self.assertEqual(body, obj[:501])
# Sanity-check for multi-range resume: make sure we actually break
# in the middle of the second byterange. This test is partially
# about what happens when all the object servers break at once, and
# partially about validating all these mocks we do. After all, the
# point of resuming is that the client can't tell anything went
# wrong, so we need a test where we can't resume and something
# *does* go wrong so we can observe it.
bytes_before_timeout[0] = 700
kaboomed[0] = 0
sabotaged[0] = False
prosrv.error_limiter.stats.clear() # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
sabotaged_hrtdi): # perma-broken
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = b''
try:
for chunk in res.app_iter:
body += chunk
except (ChunkReadTimeout, ChunkReadError):
pass
self.assertEqual(res.status_int, 206)
self.assertGreater(kaboomed[0], 0) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(BytesIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(len(got_byteranges[1]), 199) # partial
# Multi-range resume, resuming in the middle of the first byterange
bytes_before_timeout[0] = 300
kaboomed[0] = 0
sabotaged[0] = False
prosrv.error_limiter.stats.clear() # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = b''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(BytesIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second set
# of MIME headers
bytes_before_timeout[0] = 501
kaboomed[0] = 0
sabotaged[0] = False
prosrv.error_limiter.stats.clear() # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = b''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertGreaterEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(BytesIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second
# byterange
bytes_before_timeout[0] = 750
kaboomed[0] = 0
sabotaged[0] = False
prosrv.error_limiter.stats.clear() # clear out errors
with mock.patch.object(proxy_base,
'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = b''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertGreaterEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertIsNotNone(boundary) # sanity check
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(BytesIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
@unpatch_policies
def test_conditional_GET_replication(self):
policy = POLICIES[0]
self.assertEqual('replication', policy.policy_type) # sanity
self._test_conditional_GET(policy)
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if headers.get('Transfer-Encoding') != 'chunked':
test_errors.append('"Transfer-Encoding: chunked" should '
'be in headers for object server!')
if 'Expect' not in headers:
test_errors.append('Expect should be in headers for '
'object server!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
# But the object server won't send a 100 continue line if the
# client doesn't send a expect 100 header (as is the case with
# zero byte PUTs as validated by this test), nevertheless the
# object controller calls getexpect without prejudice. In this
# case the status from the response shows up early in getexpect
# instead of having to wait until getresponse. The Exception is
# in there to ensure that the object controller also *uses* the
# result of getexpect instead of calling getresponse in which case
# our FakeConn will blow up.
success_codes = [(201, Exception('test'))] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '), res.status)
def test_PUT_expect_header_nonzero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'Expect' not in headers:
test_errors.append('Expect was not in headers for '
'non-zero byte PUT!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
# for 201 if no expect_status is specified.
success_codes = [(100, 201)] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '))
def _check_PUT_respects_write_affinity(self, conf, policy,
expected_region):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
# mock shuffle to be a no-op to ensure that the only way nodes would
# not be used in ring order is if affinity is respected.
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app = proxy_server.Application(
conf,
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
object_ring = app.get_object_ring(policy)
object_ring.max_more_nodes = 100
controller = \
ReplicatedObjectController(
app, 'a', 'c', 'o.jpg')
# requests go to acc, con, obj, obj, obj
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank(
'/v1/a/c/o.jpg', method='PUT', body='a',
headers={'X-Backend-Storage-Policy-Index': str(policy)})
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
for ip, port, device in written_to:
# this is kind of a hokey test, but in FakeRing, the port is even
# when the region is 0, and odd when the region is 1, so this test
# asserts that we only wrote to nodes in region 0.
self.assertEqual(expected_region, port % 2)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_PUT_respects_write_affinity(self):
# nodes in fake ring order have r0z0, r1z1, r0z2
# Check default conf via proxy server conf
conf = {'write_affinity': 'r0'}
self._check_PUT_respects_write_affinity(conf, 0, 0)
# policy 0 and policy 1 have conf via per policy conf section
conf = {
'write_affinity': '',
'policy_config': {
'0': {'write_affinity': 'r0'},
'1': {'write_affinity': 'r1'}
}
}
self._check_PUT_respects_write_affinity(conf, 0, 0)
self._check_PUT_respects_write_affinity(conf, 1, 1)
# policy 0 conf via per policy conf section override proxy server conf,
# policy 1 uses default
conf = {
'write_affinity': 'r0',
'policy_config': {
'0': {'write_affinity': 'r1'}
}
}
self._check_PUT_respects_write_affinity(conf, 0, 1)
self._check_PUT_respects_write_affinity(conf, 1, 0)
def test_PUT_respects_write_affinity_with_507s(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(0)
object_ring.max_more_nodes = 100
policy_options = self.app.get_policy_options(POLICIES[0])
policy_options.write_affinity_is_local_fn = is_r0
policy_options.write_affinity_node_count_fn = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
error_node = object_ring.get_part_nodes(1)[0]
self.app.error_limit(error_node, 'test')
self.assertEqual(
1, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.forced_limit', 0))
line = self.logger.get_lines_for_level('error')[-1]
self.assertEqual(
('Node will be error limited for 60.00s: %s, error: %s'
% (node_to_string(error_node), 'test')), line)
# no error limited checking yet.
self.assertEqual(
0, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.is_limited', 0))
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
# error limited happened during PUT.
self.assertEqual(
1, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.is_limited', 0))
# this is kind of a hokey test, but in FakeRing, the port is even when
# the region is 0, and odd when the region is 1, so this test asserts
# that we wrote to 2 nodes in region 0, then went to 1 non-r0 node.
def get_region(x):
return x[1] % 2 # it's (ip, port, device)
self.assertEqual([0, 0, 1], [get_region(x) for x in written_to])
@unpatch_policies
def test_PUT_no_etag_fallocate(self):
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = b'hemoleucocytic-surfactant'
fd.write(('PUT /v1/a/c/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one for each obj server; this test has 2
self.assertEqual(len(mock_fallocate.mock_calls), 2)
@unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
obj = b'j' * 20
fd.write(('PUT /v1/a/c/o.content-length HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (str(len(obj)))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.assertFalse(mock_fallocate.mock_calls)
fd.write(b'GET /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'Content-Length: 33', headers.split(b'\r\n'))
self.assertEqual(b"oh say can you see by the dawns'\n", fd.read(33))
@unpatch_policies
def test_PUT_message_length_using_both_with_crazy_meta(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: message/rfc822\r\n'
b'Content-Length: 33\r\n'
b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d\r\n'
b'X-Object-Meta-\xe2\x98\x85: \xe2\x98\x85\r\n'
b'Expect: 100-continue\r\n'
b'Transfer-Encoding: chunked\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 100 Continue'
self.assertEqual(headers[:len(exp)], exp)
# Since we got our 100 Continue, now we can send the body
fd.write(b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(b'GET /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
header_lines = headers.split(b'\r\n')
self.assertIn(b'Content-Length: 33', header_lines)
self.assertIn(b'Content-Type: message/rfc822', header_lines)
self.assertIn(b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d',
header_lines)
self.assertIn(b'X-Object-Meta-\xe2\x98\x85: \xe2\x98\x85',
header_lines)
self.assertEqual(b"oh say can you see by the dawns'\n", fd.read(33))
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n'
b'Transfer-Encoding: gzip\r\n\r\n'
b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n'
b'Transfer-Encoding: gzip,chunked\r\n\r\n'
b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_too_large(self):
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n\r\n'
b'oh say can you see by the dawns\'\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_POST_last_modified(self):
prolis = _test_sockets[0]
def _do_HEAD():
# do a HEAD to get reported last modified time
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'HEAD /v1/a/c/o.last_modified HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
last_modified_head = [line for line in headers.split(b'\r\n')
if lm_hdr in line][0][len(lm_hdr):]
return last_modified_head
def _do_conditional_GET_checks(last_modified_time):
# check If-(Un)Modified-Since GETs
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'If-Modified-Since: %s\r\n'
b'X-Storage-Token: t\r\n\r\n' % last_modified_time)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 304'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'If-Unmodified-Since: %s\r\n'
b'X-Storage-Token: t\r\n\r\n' % last_modified_time)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# PUT the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.last_modified HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
lm_hdr = b'Last-Modified: '
self.assertEqual(headers[:len(exp)], exp)
last_modified_put = [line for line in headers.split(b'\r\n')
if lm_hdr in line][0][len(lm_hdr):]
last_modified_head = _do_HEAD()
self.assertEqual(last_modified_put, last_modified_head)
_do_conditional_GET_checks(last_modified_put)
# now POST to the object
# last-modified rounded in sec so sleep a sec to increment
sleep(1)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'POST /v1/a/c/o.last_modified HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 202'
self.assertEqual(headers[:len(exp)], exp)
for line in headers.split(b'\r\n'):
self.assertFalse(line.startswith(lm_hdr))
# last modified time will have changed due to POST
last_modified_head = _do_HEAD()
self.assertNotEqual(last_modified_put, last_modified_head)
_do_conditional_GET_checks(last_modified_head)
@unpatch_policies
def test_PUT_auto_content_type(self):
prolis = _test_sockets[0]
def do_test(ext, content_type):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.%s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
ext.encode())
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(b'GET /v1/a/c/o.%s HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\n\r\n' % ext.encode())
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertIn(b'Content-Type: %s' % content_type.encode(),
headers.split(b'\r\n'))
sock.close()
do_test('jpg', 'image/jpeg')
do_test('html', 'text/html')
do_test('css', 'text/css')
def test_custom_mime_types_files(self):
swift_dir = mkdtemp()
try:
with open(os.path.join(swift_dir, 'mime.types'), 'w') as fp:
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
debug_logger(),
FakeRing(), FakeRing())
self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0],
'image/jpeg')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_PUT(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, 201), 201)
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res: # noqa: F841
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
# connect errors
test_status_map((200, 200, Timeout(), 201, 201, ), 201)
test_status_map((200, 200, 201, 201, Exception()), 201)
# expect errors
test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
test_status_map((200, 200, (Exception(), None), 201, 201), 201)
# response errors
test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
test_status_map((200, 200, (100, Exception()), 201, 201), 201)
test_status_map((200, 200, 507, 201, 201), 201) # error limited
test_status_map((200, 200, -1, 201, -1), 503)
test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res: # noqa: F841
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 400)
def test_PUT_getresponse_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res: # noqa: F841
pass
expected = str(expected)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 201, 201, -1), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_POST(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
def do_test(resp_headers):
backend_requests = []
def capture_requests(ip, port, method, path, headers, *args,
**kwargs):
backend_requests.append((method, path, headers))
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'Content-Type': 'text/plain'})
# we want the container_info response to says a policy index of 1
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
def check_request(req, method, path, headers=None):
req_method, req_path, req_headers = req
self.assertEqual(method, req_method)
# caller can ignore leading path parts
self.assertTrue(req_path.endswith(path),
'expected path to end with %s, it was %s' % (
path, req_path))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req_headers[k], v)
self.assertNotIn('X-Backend-Container-Path', req_headers)
account_request = backend_requests.pop(0)
check_request(account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests.pop(0)
check_request(container_request, method='HEAD', path='/sda/0/a/c')
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests:
req_headers = request[2]
device = req_headers['x-container-device']
host = req_headers['x-container-host']
container_headers[device] = host
expectations = {
'method': 'POST',
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Connection': 'close',
'User-Agent': 'proxy-server %s' % os.getpid(),
'Host': 'localhost:80',
'Referer': 'POST http://localhost/v1/a/c/o',
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '1'
},
}
check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# and again with policy override
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
for request in backend_requests[2:]:
expectations = {
'method': 'POST',
'path': '/0/a/c/o', # ignore device bit
'headers': {
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '0',
}
}
check_request(request, **expectations)
resp_headers = {'X-Backend-Storage-Policy-Index': 1}
do_test(resp_headers)
resp_headers['X-Backend-Sharding-State'] = 'unsharded'
do_test(resp_headers)
def _check_request(self, req, method, path, headers=None, params=None):
self.assertEqual(method, req['method'])
# caller can ignore leading path parts
self.assertTrue(req['path'].endswith(path),
'expected path to end with %s, it was %s' % (
path, req['path']))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req['headers'][k], v,
'Expected %s but got %s for key %s' %
(v, req['headers'][k], k))
params = params or {}
req_params = dict(parse_qsl(req['qs'])) if req['qs'] else {}
for k, v in params.items():
self.assertEqual(req_params[k], v,
'Expected %s but got %s for key %s' %
(v, req_params[k], k))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_no_cache(self):
# verify that when container is sharded the backend container update is
# directed to the shard container
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 0
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
req = Request.blank('/v1/a/c/o', {}, method=method, body='',
headers={'Content-Type': 'text/plain'})
# we want the container_info response to say policy index of 1 and
# sharding state
# acc HEAD, cont HEAD, cont shard GET, obj POSTs
status_codes = (200, 200, 200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
shard_range = utils.ShardRange(
'.shards_a/c_shard', utils.Timestamp.now(), 'l', 'u')
body = json.dumps([dict(shard_range)]).encode('ascii')
with mocked_http_conn(*status_codes, headers=resp_headers,
body=body) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual(
{'account.info.cache.disabled.200': 1,
'account.info.infocache.hit': 2,
'container.info.cache.disabled.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.cache.disabled.200': 1},
stats)
backend_requests = fake_conn.requests
# verify statsd prefix is not mutated
self.assertEqual([], self.app.logger.log_dict['set_statsd_prefix'])
account_request = backend_requests[0]
self._check_request(
account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests[1]
self._check_request(
container_request, method='HEAD', path='/sda/0/a/c')
container_request_shard = backend_requests[2]
self._check_request(
container_request_shard, method='GET', path='/sda/0/a/c',
params={'includes': 'o', 'states': 'updating'},
headers={'X-Backend-Record-Type': 'shard'})
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[3:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Quoted-Container-Path': shard_range.name
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_with_empty_cache(self):
# verify that when container is sharded the backend container update is
# directed to the shard container
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 3600
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
req = Request.blank(
'/v1/a/c/o', {'swift.cache': FakeMemcache()},
method=method, body='', headers={'Content-Type': 'text/plain'})
# we want the container_info response to say policy index of 1 and
# sharding state
# acc HEAD, cont HEAD, cont shard GET, obj POSTs
status_codes = (200, 200, 200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
shard_ranges = [
utils.ShardRange(
'.shards_a/c_not_used', utils.Timestamp.now(), '', 'l'),
utils.ShardRange(
'.shards_a/c_shard', utils.Timestamp.now(), 'l', 'u'),
utils.ShardRange(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
body = json.dumps([
dict(shard_range)
for shard_range in shard_ranges]).encode('ascii')
with mocked_http_conn(*status_codes, headers=resp_headers,
body=body) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual({'account.info.cache.miss.200': 1,
'account.info.infocache.hit': 2,
'container.info.cache.miss.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.cache.miss.200': 1},
stats)
self.assertEqual([], self.app.logger.log_dict['set_statsd_prefix'])
info_lines = self.logger.get_lines_for_level('info')
self.assertIn(
'Caching updating shards for shard-updating-v2/a/c (3 shards)',
info_lines)
backend_requests = fake_conn.requests
account_request = backend_requests[0]
self._check_request(
account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests[1]
self._check_request(
container_request, method='HEAD', path='/sda/0/a/c')
container_request_shard = backend_requests[2]
self._check_request(
container_request_shard, method='GET', path='/sda/0/a/c',
params={'states': 'updating'},
headers={'X-Backend-Record-Type': 'shard'})
cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
cached_namespaces = NamespaceBoundList.parse(shard_ranges)
self.assertEqual(
req.environ['swift.cache'].store[cache_key],
cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
self.assertEqual(
req.environ['swift.infocache'][cache_key].bounds,
cached_namespaces.bounds)
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[3:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Quoted-Container-Path': shard_ranges[1].name
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_with_live_cache(self):
# verify that when container is sharded the backend container update is
# directed to the shard container
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 3600
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
shard_ranges = [
utils.ShardRange(
'.shards_a/c_not_used', utils.Timestamp.now(), '', 'l'),
utils.ShardRange(
'.shards_a/c_shard', utils.Timestamp.now(), 'l', 'u'),
utils.ShardRange(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
cache = FakeMemcache()
cache.set(
'shard-updating-v2/a/c',
tuple(
[shard_range.lower_str, str(shard_range.name)]
for shard_range in shard_ranges))
req = Request.blank('/v1/a/c/o', {'swift.cache': cache},
method=method, body='',
headers={'Content-Type': 'text/plain'})
# we want the container_info response to say policy index of 1 and
# sharding state
# acc HEAD, cont HEAD, obj POSTs
status_codes = (200, 200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(*status_codes,
headers=resp_headers) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual({'account.info.cache.miss.200': 1,
'account.info.infocache.hit': 1,
'container.info.cache.miss.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.cache.hit': 1}, stats)
# verify statsd prefix is not mutated
self.assertEqual([], self.app.logger.log_dict['set_statsd_prefix'])
backend_requests = fake_conn.requests
account_request = backend_requests[0]
self._check_request(
account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests[1]
self._check_request(
container_request, method='HEAD', path='/sda/0/a/c')
# infocache gets populated from memcache
cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ.get('swift.infocache'))
self.assertEqual(
req.environ['swift.infocache'][cache_key].bounds,
NamespaceBoundList.parse(shard_ranges).bounds)
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[2:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Quoted-Container-Path': shard_ranges[1].name
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_with_live_infocache(self):
# verify that when container is sharded the backend container update is
# directed to the shard container
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 3600
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
shard_ranges = [
utils.ShardRange(
'.shards_a/c_not_used', utils.Timestamp.now(), '', 'l'),
utils.ShardRange(
'.shards_a/c_shard', utils.Timestamp.now(), 'l', 'u'),
utils.ShardRange(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
infocache = {
'shard-updating-v2/a/c':
NamespaceBoundList.parse(shard_ranges)}
req = Request.blank('/v1/a/c/o', {'swift.infocache': infocache},
method=method, body='',
headers={'Content-Type': 'text/plain'})
# we want the container_info response to say policy index of 1 and
# sharding state
# acc HEAD, cont HEAD, obj POSTs
status_codes = (200, 200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(*status_codes,
headers=resp_headers) as fake_conn:
resp = req.get_response(self.app)
# verify request hitted infocache.
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual({'account.info.cache.disabled.200': 1,
'account.info.infocache.hit': 1,
'container.info.cache.disabled.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.infocache.hit': 1}, stats)
# verify statsd prefix is not mutated
self.assertEqual([], self.app.logger.log_dict['set_statsd_prefix'])
backend_requests = fake_conn.requests
account_request = backend_requests[0]
self._check_request(
account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests[1]
self._check_request(
container_request, method='HEAD', path='/sda/0/a/c')
# verify content in infocache.
cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ.get('swift.infocache'))
self.assertEqual(
req.environ['swift.infocache'][cache_key].bounds,
NamespaceBoundList.parse(shard_ranges).bounds)
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[2:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Quoted-Container-Path': shard_ranges[1].name
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_can_skip_cache(self):
# verify that when container is sharded the backend container update is
# directed to the shard container
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 3600
self.app.container_updating_shard_ranges_skip_cache = 0.001
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
cached_shard_ranges = [
utils.ShardRange(
'.shards_a/c_nope', utils.Timestamp.now(), '', 'l'),
utils.ShardRange(
'.shards_a/c_uhn_uh', utils.Timestamp.now(), 'l', 'u'),
utils.ShardRange(
'.shards_a/c_no_way', utils.Timestamp.now(), 'u', ''),
]
cache = FakeMemcache()
cache.set('shard-updating-v2/a/c',
tuple(
[sr.lower_str, str(sr.name)]
for sr in cached_shard_ranges))
# sanity check: we can get the old shard from cache
req = Request.blank(
'/v1/a/c/o', {'swift.cache': cache},
method=method, body='', headers={'Content-Type': 'text/plain'})
# acc HEAD, cont HEAD, obj POSTs
# we want the container_info response to say policy index of 1 and
# sharding state
status_codes = (200, 200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
with mock.patch('random.random', return_value=1), \
mocked_http_conn(*status_codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual({'account.info.cache.miss.200': 1,
'account.info.infocache.hit': 1,
'container.info.cache.miss.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.cache.hit': 1}, stats)
# cached shard ranges are still there
cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
cached_namespaces = NamespaceBoundList.parse(cached_shard_ranges)
self.assertEqual(
req.environ['swift.cache'].store[cache_key],
cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
self.assertEqual(
req.environ['swift.infocache'][cache_key].bounds,
cached_namespaces.bounds)
# ...but we have some chance to skip cache
req = Request.blank(
'/v1/a/c/o', {'swift.cache': cache},
method=method, body='', headers={'Content-Type': 'text/plain'})
# cont shard GET, obj POSTs
status_codes = (200, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
shard_ranges = [
utils.ShardRange(
'.shards_a/c_not_used', utils.Timestamp.now(), '', 'l'),
utils.ShardRange(
'.shards_a/c_shard', utils.Timestamp.now(), 'l', 'u'),
utils.ShardRange(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
body = json.dumps([
dict(shard_range)
for shard_range in shard_ranges]).encode('ascii')
with mock.patch('random.random', return_value=0), \
mocked_http_conn(*status_codes, headers=resp_headers,
body=body) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual({'account.info.cache.miss.200': 1,
'account.info.infocache.hit': 1,
'container.info.cache.miss.200': 1,
'container.info.infocache.hit': 2,
'object.shard_updating.cache.hit': 1,
'container.info.cache.hit': 1,
'account.info.cache.hit': 1,
'object.shard_updating.cache.skip.200': 1},
stats)
# verify statsd prefix is not mutated
self.assertEqual([], self.app.logger.log_dict['set_statsd_prefix'])
backend_requests = fake_conn.requests
container_request_shard = backend_requests[0]
self._check_request(
container_request_shard, method='GET', path='/sda/0/a/c',
params={'states': 'updating'},
headers={'X-Backend-Record-Type': 'shard'})
# and skipping cache will refresh it
cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
cached_namespaces = NamespaceBoundList.parse(shard_ranges)
self.assertEqual(
req.environ['swift.cache'].store[cache_key],
cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
self.assertEqual(
req.environ['swift.infocache'][cache_key].bounds,
cached_namespaces.bounds)
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[1:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
'X-Backend-Quoted-Container-Path': shard_ranges[1].name
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# shard lookup in memcache may error...
req = Request.blank(
'/v1/a/c/o', {'swift.cache': cache},
method=method, body='', headers={'Content-Type': 'text/plain'})
cache.error_on_get = [False, True]
with mock.patch('random.random', return_value=1.0), \
mocked_http_conn(*status_codes, headers=resp_headers,
body=body):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual(stats, {
'account.info.cache.hit': 2,
'account.info.cache.miss.200': 1,
'account.info.infocache.hit': 1,
'container.info.cache.hit': 2,
'container.info.cache.miss.200': 1,
'container.info.infocache.hit': 3,
'object.shard_updating.cache.skip.200': 1,
'object.shard_updating.cache.hit': 1,
'object.shard_updating.cache.error.200': 1})
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_backend_headers_update_shard_container_errors(self):
# verify that update target reverts to root if get shard ranges fails
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes
self.app.recheck_updating_shard_ranges = 0
def do_test(method, sharding_state):
self.app.logger.clear() # clean capture state
req = Request.blank('/v1/a/c/o', {}, method=method, body='',
headers={'Content-Type': 'text/plain'})
# we want the container_info response to say policy index of 1 and
# sharding state, but we want shard range listings to fail
# acc HEAD, cont HEAD, cont shard GETs, obj POSTs
status_codes = (200, 200, 404, 404, 404, 202, 202, 202)
resp_headers = {'X-Backend-Storage-Policy-Index': 1,
'x-backend-sharding-state': sharding_state}
with mocked_http_conn(*status_codes,
headers=resp_headers) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual(
{'account.info.cache.disabled.200': 1,
'account.info.infocache.hit': 2,
'container.info.cache.disabled.200': 1,
'container.info.infocache.hit': 1,
'object.shard_updating.cache.disabled.404': 1},
stats)
backend_requests = fake_conn.requests
account_request = backend_requests[0]
self._check_request(
account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests[1]
self._check_request(
container_request, method='HEAD', path='/sda/0/a/c')
container_request_shard = backend_requests[2]
self._check_request(
container_request_shard, method='GET', path='/sda/0/a/c',
params={'includes': 'o', 'states': 'updating'},
headers={'X-Backend-Record-Type': 'shard'})
# infocache does not get populated from memcache
cache_key = 'shard-updating-v2/a/c'
self.assertNotIn(cache_key, req.environ.get('swift.infocache'))
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests[5:]:
req_headers = request['headers']
device = req_headers['x-container-device']
container_headers[device] = req_headers['x-container-host']
expectations = {
'method': method,
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Host': 'localhost:80',
'Referer': '%s http://localhost/v1/a/c/o' % method,
'X-Backend-Storage-Policy-Index': '1',
# X-Backend-Quoted-Container-Path is not sent
},
}
self._check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
do_test('POST', 'sharding')
do_test('POST', 'sharded')
do_test('DELETE', 'sharding')
do_test('DELETE', 'sharded')
do_test('PUT', 'sharding')
do_test('PUT', 'sharded')
def test_DELETE(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
test_status_map((200, 200, 200, 404, 404), 200)
test_status_map((200, 200, 200, 500, 404), 200)
test_status_map((200, 200, 304, 500, 404), 304)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 404, 404, 500), 404)
test_status_map((200, 200, 500, 500, 500), 503)
POLICIES.default.object_ring.max_more_nodes = 3
test_status_map(
(200, 200,
Timeout(), Timeout(), Timeout(), # Can't reach primaries
404, 404, 404), # No surprise: handoffs know nothing
503)
def test_HEAD_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
# acct cont obj obj obj
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
None, '1'), '1')
def test_GET_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'GET'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
'1', '2'), None)
def test_POST_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_authorize(self):
def authorize(req):
req.headers['X-Object-Meta-Foo'] = 'x' * (limit + 1)
return
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status_int, 400)
def test_POST_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_count(self):
with save_globals():
limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_size(self):
with save_globals():
limit = constraints.MAX_META_OVERALL_SIZE
count = limit // 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
for i in range(count + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_PUT_not_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/right', 'Content-Length': 0}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('something/right'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_PUT_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/wrong', 'Content-Length': 0,
'X-Detect-Content-Type': 'True'}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('text/html'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return b' '
return b''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': FakeMemcache(),
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.client_timeout = 0.05
req.environ['wsgi.input'] = SlowBody() # Need a fresh instance
self.app.update_request(req)
set_http_connect(201, 201, 201)
# obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class DisconnectedBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
return b''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
# chunked transfers basically go "until I stop sending bytes"
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # ... so, no disconnect
# chunked transfer trumps content-length
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Content-Length': '4',
'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200, slow=0.1)
req.sent_size = 0
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertFalse(got_exc)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=1.0)
resp = req.get_response(self.app)
with self.assertRaises(ChunkReadTimeout):
resp.body
def test_node_read_timeout_retry(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={
'REQUEST_METHOD': 'GET', 'swift.cache': FakeMemcache()})
self.app.update_request(req)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=[1.0, 1.0, 1.0])
resp = req.get_response(self.app)
with self.assertRaises(ChunkReadTimeout):
resp.body
set_http_connect(200, 200, 200, body=b'lalala',
slow=[1.0, 1.0])
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'lalala')
set_http_connect(200, 200, 200, body=b'lalala',
slow=[1.0, 1.0], etags=['a', 'a', 'a'])
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'lalala')
set_http_connect(200, 200, 200, body=b'lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'a'])
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'lalala')
set_http_connect(200, 200, 200, body=b'lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'b'])
resp = req.get_response(self.app)
with self.assertRaises(ChunkReadTimeout):
resp.body
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201, slow=0.1)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.node_timeout = 0.1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(201, 201, 201, slow=1.0)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_node_request_setting(self):
# default is 2 * replicas
baseapp = proxy_server.Application({},
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(6, baseapp.request_node_count(3))
def do_test(value, replicas, expected):
baseapp = proxy_server.Application({'request_node_count': value},
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(expected, baseapp.request_node_count(replicas))
do_test('3', 4, 3)
do_test('1 * replicas', 4, 4)
do_test('2 * replicas', 4, 8)
do_test('4', 4, 4)
do_test('5', 4, 5)
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with self.assertRaises(ValueError):
proxy_server.Application({'request_node_count': bad},
container_ring=FakeRing(),
account_ring=FakeRing())
def test_iter_nodes(self):
with save_globals():
try:
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 2
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 5)
object_ring.max_more_nodes = 6
self.app.request_node_count = lambda r: 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 9)
# zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger.clear() # clean capture state
self.app.request_node_count = lambda r: 7
object_ring.max_more_nodes = 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [])
self.assertEqual(
self.app.logger.statsd_client.get_increments(), [])
# one error-limited primary node -> one handoff warning
self.app.log_handoffs = True
self.app.logger.clear() # clean capture state
self.app.request_node_count = lambda r: 7
self.app.error_limiter.stats.clear() # clear out errors
set_node_errors(self.app, object_ring._devs[0], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(
self.app.logger.get_lines_for_level('warning'), [
'Handoff requested (5)'])
self.assertEqual(
self.app.logger.statsd_client.get_increments(),
['error_limiter.is_limited', 'handoff_count'])
# two error-limited primary nodes -> two handoff warnings
self.app.log_handoffs = True
self.app.logger.clear() # clean capture state
self.app.request_node_count = lambda r: 7
self.app.error_limiter.stats.clear() # clear out errors
for i in range(2):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(
self.app.logger.get_lines_for_level('warning'), [
'Handoff requested (5)',
'Handoff requested (6)',
])
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual(2, stats.get('error_limiter.is_limited', 0))
self.assertEqual(2, stats.get('handoff_count', 0))
# all error-limited primary nodes -> four handoff warnings,
# plus a handoff-all metric
self.app.log_handoffs = True
self.app.logger.clear() # clean capture state
self.app.request_node_count = lambda r: 10
object_ring.set_replicas(4) # otherwise we run out of handoffs
self.app.error_limiter.stats.clear() # clear out errors
for i in range(4):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, partition, self.logger,
request=Request.blank('')):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 10)
self.assertEqual(
self.app.logger.get_lines_for_level('warning'), [
'Handoff requested (7)',
'Handoff requested (8)',
'Handoff requested (9)',
'Handoff requested (10)',
])
stats = self.app.logger.statsd_client.get_increment_counts()
self.assertEqual(4, stats.get('error_limiter.is_limited', 0))
self.assertEqual(4, stats.get('handoff_count', 0))
self.assertEqual(1, stats.get('handoff_all_count', 0))
finally:
object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
called = []
def fake_sort_nodes(nodes, **kwargs):
# caller might mutate the list we return during iteration, we're
# interested in the value as of call time
called.append(mock.call(list(nodes), **kwargs))
return nodes
with mock.patch.object(self.app, 'sort_nodes',
side_effect=fake_sort_nodes):
object_ring = self.app.get_object_ring(None)
for node in proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')):
pass
self.assertEqual(called, [
mock.call(object_ring.get_part_nodes(0), policy=None)
])
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n):
object_ring = self.app.get_object_ring(None)
first_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')))
second_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')))
self.assertIn(first_nodes[0], second_nodes)
self.assertEqual(
0, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.is_limited', 0))
self.assertEqual(
0, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.forced_limit', 0))
self.app.error_limit(first_nodes[0], 'test')
self.assertEqual(
1, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.forced_limit', 0))
line = self.logger.get_lines_for_level('error')[-1]
self.assertEqual(
('Node will be error limited for 60.00s: %s, error: %s'
% (node_to_string(first_nodes[0]), 'test')), line)
second_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')))
self.assertNotIn(first_nodes[0], second_nodes)
self.assertEqual(
1, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.is_limited', 0))
third_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')))
self.assertNotIn(first_nodes[0], third_nodes)
self.assertEqual(
2, self.logger.statsd_client.get_increment_counts().get(
'error_limiter.is_limited', 0))
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
object_ring = self.app.get_object_ring(None)
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 6), \
mock.patch.object(object_ring, 'max_more_nodes', 99):
first_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')))
second_nodes = []
for node in proxy_base.NodeIter(
self.app, object_ring, 0, self.logger,
request=Request.blank('')):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
self.assertEqual(len(first_nodes), 6)
self.assertEqual(len(second_nodes), 7)
def test_iter_nodes_without_replication_network(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D',
use_replication=False)
for n in range(10)]
expected = [dict(n) for n in node_list]
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 3):
got_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger, Request.blank(''),
node_iter=iter(node_list)))
self.assertEqual(expected[:3], got_nodes)
req = Request.blank('/v1/a/c')
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
for n in range(10)]
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000):
got_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger, req,
node_iter=iter(node_list)))
self.assertEqual(expected, got_nodes)
def test_iter_nodes_with_replication_network(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D',
use_replication=False)
for n in range(10)]
req = Request.blank(
'/v1/a/c', headers={'x-backend-use-replication-network': 'true'})
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 3):
got_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger, req,
node_iter=iter(node_list)))
expected = [dict(n, use_replication=True) for n in node_list]
self.assertEqual(expected[:3], got_nodes)
req = Request.blank(
'/v1/a/c', headers={'x-backend-use-replication-network': 'false'})
expected = [dict(n, use_replication=False) for n in node_list]
with mock.patch.object(self.app, 'sort_nodes',
lambda n, *args, **kwargs: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 13):
got_nodes = list(proxy_base.NodeIter(
self.app, object_ring, 0, self.logger, req,
node_iter=iter(node_list)))
self.assertEqual(expected, got_nodes)
def test_best_response_sets_headers(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [b''] * 3,
'Object', headers=[{'X-Test': '1'},
{'X-Test': '2'},
{'X-Test': '3'}])
self.assertEqual(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [b''] * 3,
'Object')
self.assertIsNone(resp.etag)
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [b''] * 3,
'Object',
etag='68b329da9893e34099c7d8ad5cb9c940'
)
self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940')
def test_proxy_passes_content_type(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'x-application/test')
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_proxy_passes_content_length_on_head(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_limiter.suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_limiter.suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
(200, 200, 200, 200, 200, 200, 202, 202,
202), 503)
self.assert_status_map(controller.DELETE,
(200, 200, 200, 204, 204, 204), 503)
self.app.error_limiter.suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
200)
self.assertRaises(BaseException,
self.assert_status_map, controller.DELETE,
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
def test_error_limiting_survives_ring_reload(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_limiter.suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_limiter.suppression_limit + 1)
# wipe out any state in the ring
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
# and we still get an error, which proves that the
# error-limiting info survived a ring reload
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
def test_PUT_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 2)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 0)
self.assertIsNotNone(node_last_error(controller.app, odevs[0]))
self.assertIsNone(node_last_error(controller.app, odevs[1]))
self.assertIsNone(node_last_error(controller.app, odevs[2]))
def test_PUT_error_limiting_last_node(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l, *args, **kwargs: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 0)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 2)
self.assertIsNone(node_last_error(controller.app, odevs[0]))
self.assertIsNone(node_last_error(controller.app, odevs[1]))
self.assertIsNotNone(node_last_error(controller.app, odevs[2]))
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.error_limiter.stats.clear()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 503)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 200, 204, 204, 204)
# acct cont obj obj obj
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 204)
set_http_connect(200, 404, 404, 404)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 503, 503, 503)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev,
self.app.error_limiter.suppression_limit + 1,
time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev, 0, last_error=None)
for dev in self.app.container_ring.devs:
set_node_errors(
self.app, dev,
self.app.error_limiter.suppression_limit + 1,
time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_requires_container_exist(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_object_to_container_does_not_exist(self):
self.app.container_ring.max_more_nodes = 3 # that's 3 handoffs
# no container found anywhere!
req = Request.blank('/v1/a/c/o', method='PUT')
with mocked_http_conn(*([200] + [404] * 6)) as fake_conn:
resp = req.get_response(self.app)
# object create returns error
self.assertEqual(resp.status_int, 404)
self.assertEqual(['HEAD'] * 7,
[r['method'] for r in fake_conn.requests])
self.assertEqual(['/a'] + ['/a/c'] * 6, [
r['path'][len('/sdX/0'):] for r in fake_conn.requests])
def test_PUT_object_to_container_exist_on_handoff(self):
self.app.container_ring.max_more_nodes = 3 # that's 3 handoffs
# finally get info after three requests
req = Request.blank('/v1/a/c/o', method='PUT', content_length=0)
account_status = [200]
container_status = ([404] * 5) + [200]
object_status = [201, 201, 201]
status = account_status + container_status + object_status
with mocked_http_conn(*status) as fake_conn:
resp = req.get_response(self.app)
# object created
self.assertEqual(resp.status_int, 201)
account_requests = fake_conn.requests[:len(account_status)]
self.assertEqual(['HEAD'],
[r['method'] for r in account_requests])
self.assertEqual(['/a'], [
r['path'][len('/sdX/0'):] for r in account_requests])
container_requests = fake_conn.requests[
len(account_status):len(account_status) + len(container_status)]
self.assertEqual(['HEAD'] * 6,
[r['method'] for r in container_requests])
self.assertEqual(['/a/c'] * 6, [
r['path'][len('/sdX/0'):] for r in container_requests])
obj_requests = fake_conn.requests[
len(account_status) + len(container_status):]
self.assertEqual(['PUT'] * 3,
[r['method'] for r in obj_requests])
self.assertEqual(['/a/c/o'] * 3, [
r['path'][len('/sdX/0'):] for r in obj_requests])
def test_PUT_object_to_primary_timeout_container_exist(self):
self.app.container_ring.max_more_nodes = 3 # that's 3 handoffs
req = Request.blank('/v1/a/c/o', method='PUT', content_length=0)
account_status = [200]
# no response from primaries but container exists on a handoff!
container_status = ([Timeout()] * 3) + [200]
object_status = [201, 201, 201]
status = account_status + container_status + object_status
with mocked_http_conn(*status) as fake_conn:
resp = req.get_response(self.app)
# object created
self.assertEqual(resp.status_int, 201)
account_requests = fake_conn.requests[:len(account_status)]
self.assertEqual(['HEAD'],
[r['method'] for r in account_requests])
self.assertEqual(['/a'], [
r['path'][len('/sdX/0'):] for r in account_requests])
container_requests = fake_conn.requests[
len(account_status):len(account_status) + len(container_status)]
self.assertEqual(['HEAD'] * 4,
[r['method'] for r in container_requests])
self.assertEqual(['/a/c'] * 4, [
r['path'][len('/sdX/0'):] for r in container_requests])
obj_requests = fake_conn.requests[
len(account_status) + len(container_status):]
self.assertEqual(['PUT'] * 3,
[r['method'] for r in obj_requests])
self.assertEqual(['/a/c/o'] * 3, [
r['path'][len('/sdX/0'):] for r in obj_requests])
def test_PUT_object_to_all_containers_error(self):
self.app.container_ring.max_more_nodes = 2 # 2 handoffs
req = Request.blank('/v1/a/c/o', method='PUT', content_length=0)
account_status = [200]
container_status = [503] * 5 # 3 replicas + 2 handoffs
status = account_status + container_status
with mocked_http_conn(*status) as fake_conn:
resp = req.get_response(self.app)
account_requests = fake_conn.requests[:len(account_status)]
self.assertEqual(['HEAD'],
[r['method'] for r in account_requests])
self.assertEqual(['/a'], [
r['path'][len('/sdX/0'):] for r in account_requests])
container_requests = fake_conn.requests[
len(account_status):len(account_status) + len(container_status)]
self.assertEqual(['HEAD'] * 5,
[r['method'] for r in container_requests])
self.assertEqual(['/a/c'] * 5, [
r['path'][len('/sdX/0'):] for r in container_requests])
# object is not created!
self.assertEqual(resp.status_int, 503)
def test_PUT_object_to_primary_containers_timeout(self):
self.app.container_ring.max_more_nodes = 2 # 2 handoffs
req = Request.blank('/v1/a/c/o', method='PUT', content_length=0)
account_status = [200]
# primary timeout db lock & handoffs 404
container_status = [Timeout()] * 3 + [404] * 2
status = account_status + container_status
with mocked_http_conn(*status) as fake_conn:
resp = req.get_response(self.app)
account_requests = fake_conn.requests[:len(account_status)]
self.assertEqual(['HEAD'],
[r['method'] for r in account_requests])
self.assertEqual(['/a'], [
r['path'][len('/sdX/0'):] for r in account_requests])
container_requests = fake_conn.requests[
len(account_status):len(account_status) + len(container_status)]
self.assertEqual(['HEAD'] * 5,
[r['method'] for r in container_requests])
self.assertEqual(['/a/c'] * 5, [
r['path'][len('/sdX/0'):] for r in container_requests])
# object is not created!
self.assertEqual(resp.status_int, 503)
def test_bad_metadata(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
cache = FakeMemcache()
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers={'Content-Length': '0'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers={'Content-Length': '0',
'X-Object-Meta-' + (
'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers={
'Content-Length': '0',
'X-Object-Meta-' + (
'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
(constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
constraints.MAX_META_VALUE_LENGTH:
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
controller = ReplicatedObjectController(
self.app, account, container, obj)
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(*args, **kwargs)
yield controller
unused_status_list = []
while True:
try:
unused_status_list.append(next(new_connect.code_iter))
except StopIteration:
break
if unused_status_list:
raise self.fail('UN-USED STATUS CODES: %r' %
unused_status_list)
@unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET invalid HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 405'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 500'
self.assertEqual(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
@unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'\r\nContent-Length: 0\r\n', headers)
@unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = b'\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
b'\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \
b'\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \
b'\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \
b'\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \
b'\xbf\x86.Test'
ustr_short = b'\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n' % quote(ustr).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List account with ustr container (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
containers = fd.read().split(b'\n')
self.assertIn(ustr, containers)
# List account with ustr container (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a?format=json HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertIn(ustr.decode('utf8'), [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a?format=xml HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'<name>%s</name>' % ustr, fd.read())
# Create ustr object with ustr metadata in ustr container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' %
(quote(ustr).encode('ascii'), quote(ustr).encode('ascii'),
quote(ustr_short).encode('ascii'),
quote(ustr).encode('ascii')))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List ustr container with ustr object (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n' % quote(ustr).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
objects = fd.read().split(b'\n')
self.assertIn(ustr, objects)
# List ustr container with ustr object (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?format=json HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertEqual(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?format=xml HTTP/1.1\r\n'
b'Host: localhost\r\nConnection: close\r\n'
b'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'<name>%s</name>' % ustr, fd.read())
# Retrieve ustr object with ustr metadata
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n' %
(quote(ustr).encode('ascii'), quote(ustr).encode('ascii')))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).title().encode('ascii'),
quote(ustr).encode('ascii')), headers)
@unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
# Also happens to assert that x-storage-token is taken as a
# replacement for x-auth-token.
fd.write(b'PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure we get what we put
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Auth-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
self.assertEqual(body, b'oh hai123456789abcdef')
@unpatch_policies
def test_conditional_range_get(self):
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/con HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n')
fd.flush()
exp = b'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# put an object in it
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/con/o HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Length: 10\r\n'
b'Content-Type: text/plain\r\n'
b'\r\n'
b'abcdefghij\r\n')
fd.flush()
exp = b'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# request with both If-None-Match and Range
etag = md5(b"abcdefghij",
usedforsecurity=False).hexdigest().encode('ascii')
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/con/o HTTP/1.1\r\n' +
b'Host: localhost\r\n' +
b'Connection: close\r\n' +
b'X-Storage-Token: t\r\n' +
b'If-None-Match: "' + etag + b'"\r\n' +
b'Range: bytes=3-8\r\n' +
b'\r\n')
fd.flush()
exp = b'HTTP/1.1 304'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
set_http_connect(200, 201, 201, 201,
etags=[None,
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941'])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 5) # server error
# req supplies etag, object servers return 422 - mismatch
headers = {'Content-Length': '0',
'ETag': '68b329da9893e34099c7d8ad5cb9c940'}
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
set_http_connect(200, 422, 422, 503,
etags=['68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941',
None,
None])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 4) # client error
def test_response_get_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assertIn('accept-ranges', resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertIn('accept-ranges', resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.GET(req)
self.assertTrue(called[0])
def _check_GET_respects_read_affinity(self, conf, policy, expected_nodes):
actual_nodes = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
actual_nodes.append(ipaddr)
# mock shuffle to be a no-op to ensure that the only way nodes would
# not be used in ring order is if affinity is respected.
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app = proxy_server.Application(
conf,
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
object_ring = app.get_object_ring(policy)
object_ring.max_more_nodes = 100
controller = \
ReplicatedObjectController(
app, 'a', 'c', 'o.jpg')
# requests go to acc, con, obj, obj, obj
set_http_connect(200, 200, 404, 404, 200,
give_connect=test_connect)
req = Request.blank(
'/v1/a/c/o.jpg',
headers={'X-Backend-Storage-Policy-Index': str(policy)})
res = controller.GET(req)
self.assertTrue(res.status.startswith('200 '))
self.assertEqual(3, len(actual_nodes))
self.assertEqual(expected_nodes, actual_nodes)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())])
def test_GET_respects_read_affinity(self):
# nodes in fake ring order have r0z0, r1z1, r0z2
# Check default conf via proxy server conf
conf = {'read_affinity': 'r0z2=1, r1=2',
'sorting_method': 'affinity'}
expected_nodes = ['10.0.0.2', '10.0.0.1', '10.0.0.0']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
# policy 0 and policy 1 have conf via per policy conf section
conf = {
'read_affinity': '',
'sorting_method': 'shuffle',
'policy_config': {
'0': {'read_affinity': 'r1z1=1, r0z2=2',
'sorting_method': 'affinity'},
'1': {'read_affinity': 'r0z2=1, r0z0=2',
'sorting_method': 'affinity'}
}
}
expected_nodes = ['10.0.0.1', '10.0.0.2', '10.0.0.0']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
expected_nodes = ['10.0.0.2', '10.0.0.0', '10.0.0.1']
self._check_GET_respects_read_affinity(conf, 1, expected_nodes)
# policy 0 conf via per policy conf section overrides proxy server conf
conf = {
'read_affinity': 'r1z1=1, r0z2=2',
'sorting_method': 'affinity',
'policy_config': {
'0': {'read_affinity': 'r0z2=1, r0=2',
'sorting_method': 'affinity'}
}
}
expected_nodes = ['10.0.0.2', '10.0.0.0', '10.0.0.1']
self._check_GET_respects_read_affinity(conf, 0, expected_nodes)
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_POST_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
orig_time = time.time
try:
t = time.time()
time.time = lambda: t
req = Request.blank('/v1/a/c/o', {},
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status, '202 Fake')
self.assertEqual(req.headers.get('x-delete-at'),
str(int(t + 60)))
finally:
time.time = orig_time
@unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
def request_init(self, *args, **kwargs):
_orig_init(self, *args, **kwargs)
_request_instances[self] = None
with mock.patch.object(Request, "__init__", request_init):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
obj_len = prosrv.client_chunk_size * 2
# PUT test file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/test_leak_1 HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Auth-Token: t\r\n'
b'Content-Length: %d\r\n'
b'Content-Type: application/octet-stream\r\n'
b'\r\n%s' % (obj_len, b'a' * obj_len))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Remember Request instance count, make sure the GC is run for
# pythons without reference counting.
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
before_request_instances = len(_request_instances)
# GET test file, but disconnect early
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/c/test_leak_1 HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Auth-Token: t\r\n'
b'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
fd.read(1)
if six.PY2:
sock.fd._sock.close()
else:
sock.fd._real_close()
# Make sure the GC is run again for pythons without reference
# counting
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
self.assertEqual(
before_request_instances, len(_request_instances))
def test_OPTIONS(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('Origin', resp.headers.get('vary'))
self.assertEqual(
sorted(resp.headers['access-control-allow-methods']
.split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sorted(resp.headers['Allow'].split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
self.assertNotIn('Access-Control-Allow-Origin', resp.headers)
self.assertNotIn('Vary', resp.headers)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('Origin', resp.headers.get('vary'))
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
self.assertNotIn('Vary', resp.headers)
self.assertEqual(
sorted(resp.headers['access-control-allow-methods']
.split(', ')),
sorted('OPTIONS GET POST PUT DELETE HEAD'.split()))
self.assertEqual('999', resp.headers['access-control-max-age'])
def _get_CORS_response(self, container_cors, strict_mode, object_get=None):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': container_cors
}
controller.container_info = stubContainerInfo
controller.app.strict_cors_mode = strict_mode
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
mock_object_get = object_get or objectGET
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(mock_object_get)(controller, req)
return resp
def test_CORS_valid_non_strict(self):
# test expose_headers to non-allowed origins
container_cors = {'allow_origin': 'http://not.foo.bar',
'expose_headers': 'X-Object-Meta-Color '
'X-Object-Meta-Color-Ex'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('Origin', resp.headers['vary'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-object-meta-color',
'x-object-meta-color-ex'])
self.assertEqual(expected_exposed, exposed)
# test allow_origin *
container_cors = {'allow_origin': '*'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('*',
resp.headers['access-control-allow-origin'])
self.assertNotIn('vary', resp.headers)
# test allow_origin empty
container_cors = {'allow_origin': ''}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=False)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('Origin', resp.headers['vary'])
def test_CORS_valid_strict(self):
# test expose_headers to non-allowed origins
container_cors = {'allow_origin': 'http://not.foo.bar',
'expose_headers': 'X-Object-Meta-Color '
'X-Object-Meta-Color-Ex'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertEqual(200, resp.status_int)
self.assertNotIn('access-control-expose-headers', resp.headers)
self.assertNotIn('access-control-allow-origin', resp.headers)
# test allow_origin *
container_cors = {'allow_origin': '*'}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertEqual(200, resp.status_int)
self.assertEqual('*',
resp.headers['access-control-allow-origin'])
self.assertNotIn('vary', resp.headers)
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-object-meta-color'])
self.assertEqual(expected_exposed, exposed)
# test allow_origin empty
container_cors = {'allow_origin': ''}
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertNotIn('access-control-expose-headers', resp.headers)
self.assertNotIn('access-control-allow-origin', resp.headers)
self.assertNotIn('vary', resp.headers)
# test proxy server cors_allow_origin option
self.app.cors_allow_origin = ['http://foo.bar']
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('Origin', resp.headers['vary'])
self.assertEqual(expected_exposed, exposed)
def test_CORS_valid_with_obj_headers(self):
container_cors = {'allow_origin': 'http://foo.bar'}
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
'Access-Control-Allow-Origin': 'http://obj.origin',
'Access-Control-Expose-Headers': 'x-trans-id'
})
resp = self._get_CORS_response(
container_cors=container_cors, strict_mode=True,
object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://obj.origin',
resp.headers['access-control-allow-origin'])
self.assertEqual('x-trans-id',
resp.headers['access-control-expose-headers'])
def test_CORS_expose_headers(self):
default_expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id'])
def objectGET(controller, req):
return Response(headers={
'X-Custom-Operator': 'hush',
'X-Custom-User': 'hush',
})
# test default expose_headers
self.app.cors_expose_headers = []
container_cors = {'allow_origin': 'http://foo.bar'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed, exposed)
# test operator expose_headers
self.app.cors_expose_headers = ['x-custom-operator', ]
container_cors = {'allow_origin': 'http://foo.bar'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-operator']),
exposed)
# test user expose_headers
self.app.cors_expose_headers = []
container_cors = {'allow_origin': 'http://foo.bar',
'expose_headers': 'x-custom-user'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-user']),
exposed)
# test user and operator expose_headers
self.app.cors_expose_headers = ['x-custom-operator', ]
container_cors = {'allow_origin': 'http://foo.bar',
'expose_headers': 'x-custom-user'}
resp = self._get_CORS_response(container_cors=container_cors,
strict_mode=False, object_get=objectGET)
self.assertEqual(200, resp.status_int)
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
self.assertEqual(default_expected_exposed | set(['x-custom-user',
'x-custom-operator']),
exposed)
def _gather_x_container_headers(self, controller_call, req, *connect_args,
**kwargs):
header_list = kwargs.pop('header_list', ['X-Container-Device',
'X-Container-Host',
'X-Container-Partition'])
seen_headers = []
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in header_list:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account/container HEADs, so chuck
# the first two requests
return sorted(seen_headers[2:],
key=lambda d: d.get(header_list[0]) or 'z')
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': None,
'X-Container-Partition': None,
'X-Container-Device': None}])
def test_PUT_x_container_headers_with_many_object_replicas(self):
POLICIES[0].object_ring.set_replicas(11)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
# HEAD HEAD PUT PUT PUT PUT PUT PUT PUT PUT PUT PUT PUT
200, 200, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201)
self.assertEqual(
dict(collections.Counter(tuple(sorted(h.items()))
for h in seen_headers)),
{(('X-Container-Device', 'sda'),
('X-Container-Host', '10.0.0.0:1000'),
('X-Container-Partition', '0')): 3,
(('X-Container-Device', 'sdb'),
('X-Container-Host', '10.0.0.1:1001'),
('X-Container-Partition', '0')): 2,
(('X-Container-Device', 'sdc'),
('X-Container-Host', '10.0.0.2:1002'),
('X-Container-Partition', '0')): 2,
(('X-Container-Device', None),
('X-Container-Host', None),
('X-Container-Partition', None)): 4})
def test_PUT_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Partition': None,
'X-Delete-At-Container': None,
'X-Delete-At-Device': None},
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.expiring_objects_account = 'expires'
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
@contextmanager
def in_process_proxy(prosrv, **extra_server_kwargs):
server_kwargs = {
'protocol': SwiftHttpProtocol,
'capitalize_response_headers': False,
}
server_kwargs.update(extra_server_kwargs)
prolis = listen_zero()
try:
proxy_thread = spawn(wsgi.server, prolis, prosrv,
prosrv.logger, **server_kwargs)
yield prolis
finally:
proxy_thread.kill()
class BaseTestECObjectController(BaseTestObjectController):
def test_PUT_ec(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'abCD' * 10 # small, so we don't get multiple EC stripes
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/o1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name,
md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
ecd = self.ec_policy.pyeclib_driver
expected_pieces = set(ecd.encode(obj))
# go to disk to make sure it's there and all erasure-coded
partition, nodes = self.ec_policy.object_ring.get_nodes(
'a', self.ec_policy.name, 'o1')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
got_pieces = set()
got_indices = set()
got_durable = []
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'o1',
policy=self.ec_policy)
with df.open():
meta = df.get_metadata()
contents = b''.join(df.reader())
got_pieces.add(contents)
lmeta = dict((k.lower(), v) for k, v in meta.items())
got_indices.add(
lmeta['x-object-sysmeta-ec-frag-index'])
self.assertEqual(
lmeta['x-object-sysmeta-ec-etag'],
md5(obj, usedforsecurity=False).hexdigest())
self.assertEqual(
lmeta['x-object-sysmeta-ec-content-length'],
str(len(obj)))
self.assertEqual(
lmeta['x-object-sysmeta-ec-segment-size'],
'4096')
self.assertEqual(
lmeta['x-object-sysmeta-ec-scheme'],
'%s 2+1' % DEFAULT_TEST_EC_TYPE)
self.assertEqual(
lmeta['etag'],
md5(contents, usedforsecurity=False).hexdigest())
# check presence for a durable data file for the timestamp
durable_file = (
utils.Timestamp(df.timestamp).internal +
'#%s' % lmeta['x-object-sysmeta-ec-frag-index'] +
'#d.data')
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(self.ec_policy),
partition, hash_path('a', self.ec_policy.name, 'o1')),
durable_file)
if os.path.isfile(durable_file):
got_durable.append(True)
self.assertEqual(expected_pieces, got_pieces)
self.assertEqual(set(('0', '1', '2')), got_indices)
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertGreaterEqual(num_durable_puts, 2)
def test_PUT_ec_multiple_segments(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
pyeclib_header_size = len(self.ec_policy.pyeclib_driver.encode(b"")[0])
segment_size = self.ec_policy.ec_segment_size
# Big enough to have multiple segments. Also a multiple of the
# segment size to get coverage of that path too.
obj = b'ABC' * segment_size
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/o2 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# it's a 2+1 erasure code, so each fragment archive should be half
# the length of the object, plus three inline pyeclib metadata
# things (one per segment)
expected_length = (len(obj) // 2 + pyeclib_header_size * 3)
partition, nodes = self.ec_policy.object_ring.get_nodes(
'a', self.ec_policy.name, 'o2')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
got_durable = []
fragment_archives = []
for node in nodes:
df = df_mgr.get_diskfile(
node['device'], partition, 'a',
self.ec_policy.name, 'o2', policy=self.ec_policy)
with df.open():
meta = df.get_metadata()
contents = b''.join(df.reader())
fragment_archives.append(contents)
self.assertEqual(len(contents), expected_length)
durable_file = (
utils.Timestamp(df.timestamp).internal +
'#%s' % meta['X-Object-Sysmeta-Ec-Frag-Index'] +
'#d.data')
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(self.ec_policy),
partition, hash_path('a', self.ec_policy.name, 'o2')),
durable_file)
if os.path.isfile(durable_file):
got_durable.append(True)
# Verify that we can decode each individual fragment and that they
# are all the correct size
fragment_size = self.ec_policy.fragment_size
nfragments = int(
math.ceil(float(len(fragment_archives[0])) / fragment_size))
for fragment_index in range(nfragments):
fragment_start = fragment_index * fragment_size
fragment_end = (fragment_index + 1) * fragment_size
try:
frags = [fa[fragment_start:fragment_end]
for fa in fragment_archives]
seg = self.ec_policy.pyeclib_driver.decode(frags)
except ECDriverError:
self.fail("Failed to decode fragments %d; this probably "
"means the fragments are not the sizes they "
"should be" % fragment_index)
segment_start = fragment_index * segment_size
segment_end = (fragment_index + 1) * segment_size
self.assertEqual(seg, obj[segment_start:segment_end])
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertGreaterEqual(num_durable_puts, 2)
def test_PUT_ec_object_etag_mismatch(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/o3 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name,
md5(b'something else',
usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 422'
self.assertEqual(headers[:len(exp)], exp)
# nothing should have made it to disk on the object servers
partition, nodes = prosrv.get_object_ring(
int(self.ec_policy)).get_nodes('a', self.ec_policy.name, 'o3')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'o3',
policy=self.ec_policy)
self.assertRaises(DiskFileNotExist, df.open)
def test_PUT_ec_fragment_archive_etag_mismatch(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
# Cause a hash mismatch by feeding one particular MD5 hasher some
# extra data. The goal here is to get exactly more than one of the
# hashers in an object server.
count = (
self.ec_policy.object_ring.replica_count - self.ec_policy.ec_ndata)
countdown = [count]
def busted_md5_constructor(initial_str=b"", usedforsecurity=True):
hasher = md5(initial_str, usedforsecurity=usedforsecurity)
if countdown[0] > 0:
hasher.update(b'wrong')
countdown[0] -= 1
return hasher
obj = b'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
with mock.patch('swift.obj.diskfile.md5',
busted_md5_constructor):
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/pimento HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name,
md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# replica count - 1 of the fragment archives should have
# landed on disk
partition, nodes = prosrv.get_object_ring(
int(self.ec_policy)).get_nodes('a', self.ec_policy.name, 'pimento')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
found = 0
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', self.ec_policy.name, 'pimento',
policy=self.ec_policy)
try:
# diskfile open won't succeed because no durable was written,
# so look under the hood for data files.
files = os.listdir(df._datadir)
if len(files) > 0:
# Although the third fragment archive hasn't landed on
# disk, the directory df._datadir is pre-maturely created
# and is empty when we use O_TMPFILE + linkat()
num_data_files = \
len([f for f in files if f.endswith('.data')])
self.assertEqual(1, num_data_files)
found += 1
except OSError:
pass
self.assertEqual(found, self.ec_policy.ec_ndata)
def test_PUT_ec_fragment_quorum_archive_etag_mismatch(self):
self.put_container("ec", "ec-con")
def busted_md5_constructor(initial_str=b"", usedforsecurity=True):
hasher = md5(initial_str, usedforsecurity=usedforsecurity)
hasher.update(b'wrong')
return hasher
obj = b'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
commit_confirmation = \
'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation'
diskfile_md5 = 'swift.obj.diskfile.md5'
mem_diskfile_md5 = 'swift.obj.mem_diskfile.md5'
with mock.patch(diskfile_md5, busted_md5_constructor), \
mock.patch(mem_diskfile_md5, busted_md5_constructor), \
mock.patch(commit_confirmation, mock_committer):
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=self.ec_policy)
if os.path.exists(df._datadir):
self.assertFalse(os.listdir(df._datadir)) # should be empty
def test_PUT_ec_fragment_quorum_bad_request(self):
self.put_container("ec", "ec-con")
obj = b'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
read_footer = \
'swift.obj.server.ObjectController._read_metadata_footer'
commit_confirmation = \
'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation'
with mock.patch(read_footer) as read_footer_call, \
mock.patch(commit_confirmation, mock_committer):
# Emulate missing footer MIME doc in all object-servers
read_footer_call.side_effect = HTTPBadRequest(
body="couldn't find footer MIME doc")
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
# Don't show a result of the bad conversation between proxy-server
# and object-server
exp = b'HTTP/1.1 503'
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, debug_logger())[self.ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=self.ec_policy)
if os.path.exists(df._datadir):
self.assertFalse(os.listdir(df._datadir)) # should be empty
def test_PUT_ec_if_none_match(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'ananepionic-lepidophyllous-ropewalker-neglectful'
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name,
md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'If-None-Match: *\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name,
md5(obj, usedforsecurity=False).hexdigest(),
len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
def test_GET_ec(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'0123456' * 11 * 17
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(
md5(obj, usedforsecurity=False).hexdigest(),
headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
gotten_obj = b''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_GET_ec_deleted(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
container_name = 'ec_deleted'
self.put_container(self.ec_policy.name, container_name)
# delete container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('DELETE /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'\r\n' % container_name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 20'
self.assertEqual(headers[:len(exp)], exp)
# download from deleted container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('GET /v1/a/%s/no-object-there HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % container_name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_conditional_GET_ec(self):
# sanity
self.assertEqual('erasure_coding', self.ec_policy.policy_type)
self._test_conditional_GET(self.ec_policy)
def test_GET_ec_big(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
# our EC segment size is 4 KiB, so this is multiple (3) segments;
# we'll verify that with a sanity check
obj = b'a moose once bit my sister' * 400
self.assertGreater(
len(obj), self.ec_policy.ec_segment_size * 2,
"object is too small for proper testing")
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('GET /v1/a/%s/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(
md5(obj, usedforsecurity=False).hexdigest(),
headers['Etag'])
gotten_obj = b''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# This may look like a redundant test, but when things fail, this
# has a useful failure message while the subsequent one spews piles
# of garbage and demolishes your terminal's scrollback buffer.
self.assertEqual(len(gotten_obj), len(obj))
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_GET_ec_failure_handling(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'look at this object; it is simply amazing ' * 500
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def explodey_iter(inner_iter):
yield next(inner_iter)
raise Exception("doom ba doom")
def explodey_doc_parts_iter(inner_iter_iter):
try:
for item in inner_iter_iter:
item = item.copy() # paranoia about mutable data
item['part_iter'] = explodey_iter(item['part_iter'])
yield item
except GeneratorExit:
inner_iter_iter.close()
raise
real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
# Each thing in `iterators` here is a document-parts iterator,
# and we want to fail after getting a little into each part.
#
# That way, we ensure we've started streaming the response to
# the client when things go wrong.
return real_ec_app_iter(
path, policy,
[explodey_doc_parts_iter(i) for i in iterators],
*a, **kw)
with mock.patch("swift.proxy.controllers.obj.ECAppIter",
explodey_ec_app_iter):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('GET /v1/a/%s/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(
md5(obj, usedforsecurity=False).hexdigest(),
headers['Etag'])
gotten_obj = b''
try:
# don't hang the test run when this fails
with Timeout(300):
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
except Timeout:
self.fail("GET hung when connection failed")
# Ensure we failed partway through, otherwise the mocks could
# get out of date without anyone noticing
self.assertTrue(0 < len(gotten_obj) < len(obj))
def test_HEAD_ec(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
obj = b'0123456' * 11 * 17
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('HEAD /v1/a/%s/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(
md5(obj, usedforsecurity=False).hexdigest(),
headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_GET_ec_404(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('GET /v1/a/%s/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_HEAD_ec_404(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
self.put_container(self.ec_policy.name, self.ec_policy.name)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('HEAD /v1/a/%s/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_reload_ring_ec(self):
self.put_container("ec", "ec-con")
orig_rtime = self.ec_policy.object_ring._rtime
orig_replica_count = self.ec_policy.object_ring.replica_count
# save original file as back up
copyfile(self.ec_policy.object_ring.serialized_path,
self.ec_policy.object_ring.serialized_path + '.bak')
try:
# overwrite with 2 replica, 2 devices ring
obj_devs = []
obj_devs.append(
{'port': _test_sockets[-3].getsockname()[1],
'device': 'sdg1'})
obj_devs.append(
{'port': _test_sockets[-2].getsockname()[1],
'device': 'sdh1'})
write_fake_ring(self.ec_policy.object_ring.serialized_path,
*obj_devs)
def get_ring_reloaded_response(method):
# force to reload at the request
self.ec_policy.object_ring._rtime = 0
trans_data = ['%s /v1/a/ec-con/o2 HTTP/1.1\r\n' % method,
'Host: localhost\r\n',
'Connection: close\r\n',
'X-Storage-Token: t\r\n']
if method == 'PUT':
# small, so we don't get multiple EC stripes
obj = b'abCD' * 10
extra_trans_data = [
'Etag: "%s"\r\n' % md5(
obj, usedforsecurity=False).hexdigest(),
'Content-Length: %d\r\n' % len(obj),
'Content-Type: application/octet-stream\r\n',
'\r\n%s' % obj.decode('ascii')
]
trans_data.extend(extra_trans_data)
else:
trans_data.append('\r\n')
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(''.join(trans_data).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
# use older ring with rollbacking
return headers
for method in ('PUT', 'HEAD', 'GET', 'POST', 'DELETE'):
headers = get_ring_reloaded_response(method)
exp = b'HTTP/1.1 20'
self.assertEqual(headers[:len(exp)], exp)
# proxy didn't load newest ring, use older one
self.assertEqual(orig_replica_count,
self.ec_policy.object_ring.replica_count)
if method == 'POST':
headers = get_ring_reloaded_response(method)
exp = b'HTTP/1.1 20'
self.assertEqual(headers[:len(exp)], exp)
# sanity
self.assertEqual(orig_replica_count,
self.ec_policy.object_ring.replica_count)
finally:
self.ec_policy.object_ring._rtime = orig_rtime
os.rename(self.ec_policy.object_ring.serialized_path + '.bak',
self.ec_policy.object_ring.serialized_path)
def test_ec_client_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
# create container
fd.write(('PUT /v1/a/%s-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (self.ec_policy.name,
self.ec_policy.name)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = b'a' * 4 * 64 * 2 ** 10
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
class WrappedTimeout(utils.WatchdogTimeout):
def __enter__(self):
timeouts[self] = traceback.extract_stack()
return super(WrappedTimeout, self).__enter__()
def __exit__(self, typ, value, tb):
timeouts[self] = None
return super(WrappedTimeout, self).__exit__(typ, value, tb)
timeouts = {}
with mock.patch('swift.proxy.controllers.obj.WatchdogTimeout',
WrappedTimeout):
with mock.patch.object(_test_servers[0], 'client_timeout', new=5):
# get object
fd.write(('GET /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.ec_policy.name).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# read most of the object, and disconnect
fd.read(10)
if six.PY2:
sock.fd._sock.close()
else:
sock.fd._real_close()
self._sleep_enough(
lambda:
_test_servers[0].logger.get_lines_for_level('warning'))
# check for disconnect message!
expected = [
"Client disconnected on read of EC frag '/a/%s-discon/test'"
% self.ec_policy.name] * 2
self.assertEqual(
_test_servers[0].logger.get_lines_for_level('warning'),
expected)
# check that no coro was left waiting to write
self.assertTrue(timeouts) # sanity - WrappedTimeout did get called
missing_exits = [tb for tb in timeouts.values() if tb is not None]
self.assertFalse(
missing_exits, 'Failed to exit all ChunkWriteTimeouts.\n' +
''.join(['No exit from ChunkWriteTimeout entered at:\n' +
''.join(traceback.format_list(tb)[:-1])
for tb in missing_exits]))
# and check that the ChunkWriteTimeouts did not raise Exceptions
self.assertFalse(_test_servers[0].logger.get_lines_for_level('error'))
def test_ec_client_put_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
# create container
fd.write(('PUT /v1/a/%s-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (self.ec_policy.name,
self.ec_policy.name)).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = b'a' * 4 * 64 * 2 ** 10
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name, len(obj))).encode('ascii'))
fd.write(obj[:-10])
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending enough data']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
# try it chunked
_test_servers[0].logger.clear()
chunk = 'a' * 64 * 2 ** 10
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n' % (len(chunk), chunk)).encode('ascii'))
# no zero-byte end chunk
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
_test_servers[0].logger.clear()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n' % (len(chunk), chunk)).encode('ascii')[:-10])
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
_test_servers[0].logger.clear()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n' % len(chunk)).encode('ascii'))
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
# Do a valid guy with conflicting headers
_test_servers[0].logger.clear()
chunk = 'a' * 64 * 2 ** 10
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'Content-Length: 999999999999999999999999\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n0\r\n\r\n' % (
len(chunk), chunk)).encode('ascii'))
# no zero-byte end chunk
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.close()
sock.close()
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual([], warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
class TestECObjectController(BaseTestECObjectController, unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
self.policy = self.ec_policy = POLICIES[3]
super(TestECObjectController, self).setUp()
class TestECDuplicationObjectController(
BaseTestECObjectController, unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
self.policy = self.ec_policy = POLICIES[4]
super(TestECDuplicationObjectController, self).setUp()
class TestECMismatchedFA(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv.error_limiter.stats.clear()
def test_mixing_different_objects_fragment_archives(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
ec_policy = POLICIES[3]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# Server obj1 will have the first version of the object (obj2 also
# gets it, but that gets stepped on later)
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj3srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Servers obj2 and obj3 will have the second version of the object.
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj1srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj1srv, 'GET', bad_disk), \
mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
def test_mixing_different_objects_fragment_archives_with_dup_factor(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) = _test_servers
ec_policy = POLICIES[4]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-dup-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec-dup", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# First subset of object server will have the first version of the
# object
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj4srv, 'PUT', bad_disk), \
mock.patch.object(obj5srv, 'PUT', bad_disk), \
mock.patch.object(obj6srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=3)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Second subset will have the second version of the object.
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj1srv, 'PUT', bad_disk), \
mock.patch.object(obj2srv, 'PUT', bad_disk), \
mock.patch.object(obj3srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=3)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj4srv, 'GET', bad_disk), \
mock.patch.object(obj5srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj1srv, 'GET', bad_disk), \
mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-dup-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv.error_limiter.stats.clear()
with mock.patch.object(obj2srv, 'GET', bad_disk), \
mock.patch.object(obj3srv, 'GET', bad_disk), \
mock.patch.object(obj4srv, 'GET', bad_disk), \
mock.patch.object(obj6srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
class TestECGets(unittest.TestCase):
def setUp(self):
super(TestECGets, self).setUp()
skip_if_no_xattrs()
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv.error_limiter.stats.clear()
super(TestECGets, self).tearDown()
def _setup_nodes_and_do_GET(self, objs, node_state):
"""
A helper method that creates object fragments, stashes them in temp
dirs, and then moves selected fragments back into the hash_dirs on each
node according to a specified desired node state description.
:param objs: a dict that maps object references to dicts that describe
the object timestamp and content. Object frags will be
created for each item in this dict.
:param node_state: a dict that maps a node index to the desired state
for that node. Each desired state is a list of
dicts, with each dict describing object reference,
frag_index and whether the file moved to the node's
hash_dir should be marked as durable or not, or
converted to a meta file.
"""
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv, _obj4srv, _obj5srv, _obj6srv) = _test_servers
ec_policy = POLICIES[3]
container_name = uuid.uuid4().hex
obj_name = uuid.uuid4().hex
obj_path = os.path.join(os.sep, 'v1', 'a', container_name, obj_name)
# PUT container, make sure it worked
container_path = os.path.join(os.sep, 'v1', 'a', container_name)
ec_container = Request.blank(
container_path, environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ec_container.get_response(prosrv)
self.assertIn(resp.status_int, (201, 202))
partition, nodes = \
ec_policy.object_ring.get_nodes('a', container_name, obj_name)
# map nodes to hash dirs
node_hash_dirs = {}
node_tmp_dirs = collections.defaultdict(dict)
for node in nodes:
node_hash_dirs[node['index']] = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(ec_policy),
partition, hash_path('a', container_name, obj_name)))
def _put_object(ref, timestamp, body):
# PUT an object and then move its disk files to a temp dir
headers = {"X-Timestamp": timestamp.internal}
put_req1 = Request.blank(obj_path, method='PUT', headers=headers)
put_req1.body = body
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# GET the obj, should work fine
get_req = Request.blank(obj_path, method="GET")
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
# move all hash dir files to per-node, per-obj tempdir
for node_index, hash_dir in node_hash_dirs.items():
node_tmp_dirs[node_index][ref] = mkdtemp(dir=self.tempdir)
for f in os.listdir(hash_dir):
move(os.path.join(hash_dir, f),
os.path.join(node_tmp_dirs[node_index][ref], f))
for obj_ref, obj_info in objs.items():
_put_object(obj_ref, **obj_info)
# sanity check - all hash_dirs are empty and GET returns a 404
for hash_dir in node_hash_dirs.values():
self.assertFalse(os.listdir(hash_dir))
get_req = Request.blank(obj_path, method="GET")
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 404)
# node state is in form:
# {node_index: [{ref: object reference,
# frag_index: index,
# durable: True or False,
# meta: True or False}, ...],
# node_index: ...}
# meta takes precedence over durable
for node_index, state in node_state.items():
dest = node_hash_dirs[node_index]
for frag_info in state:
src = node_tmp_dirs[frag_info['frag_index']][frag_info['ref']]
src_files = os.listdir(src)
# sanity check, expect just a single .data file
self.assertFalse(src_files[1:])
dest_file = src_files[0]
if frag_info.get('meta', False):
# morph a data file into a meta file;
# note: a real meta file would not have content
dest_file = dest_file.replace(
'#%d#d.data' % frag_info['frag_index'], '.meta')
elif not frag_info.get('durable', False):
dest_file = dest_file.replace('#d', '')
move(os.path.join(src, src_files[0]),
os.path.join(dest, dest_file))
# do an object GET
get_req = Request.blank(obj_path, method='GET')
return get_req.get_response(prosrv)
def test_GET_with_missing_durables(self):
# verify object GET behavior when durable files are missing
ts_iter = make_timestamp_iter()
objs = {'obj1': dict(timestamp=next(ts_iter), body=b'body')}
# durable missing from 2/3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj1', frag_index=1, durable=False)],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# all files missing on 1 node, durable missing from 1/2 other nodes
# durable missing from 2/3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=True)],
1: [],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# durable missing from all 3 nodes
node_state = {
0: [dict(ref='obj1', frag_index=0, durable=False)],
1: [dict(ref='obj1', frag_index=1, durable=False)],
2: [dict(ref='obj1', frag_index=2, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 404)
def test_GET_with_multiple_frags_per_node(self):
# verify object GET behavior when multiple fragments are on same node
ts_iter = make_timestamp_iter()
objs = {'obj1': dict(timestamp=next(ts_iter), body=b'body')}
# scenario: only two frags, both on same node
node_state = {
0: [],
1: [dict(ref='obj1', frag_index=0, durable=True),
dict(ref='obj1', frag_index=1, durable=False)],
2: []
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
# scenario: all 3 frags on same node
node_state = {
0: [],
1: [dict(ref='obj1', frag_index=0, durable=True),
dict(ref='obj1', frag_index=1, durable=False),
dict(ref='obj1', frag_index=2, durable=False)],
2: []
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
def test_GET_with_multiple_timestamps_on_nodes(self):
ts_iter = make_timestamp_iter()
ts_1, ts_2, ts_3 = [next(ts_iter) for _ in range(3)]
objs = {'obj1': dict(timestamp=ts_1, body=b'body1'),
'obj2': dict(timestamp=ts_2, body=b'body2'),
'obj3': dict(timestamp=ts_3, body=b'body3')}
# newer non-durable frags do not prevent proxy getting the durable obj1
node_state = {
0: [dict(ref='obj3', frag_index=0, durable=False),
dict(ref='obj2', frag_index=0, durable=False),
dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj3', frag_index=1, durable=False),
dict(ref='obj2', frag_index=1, durable=False),
dict(ref='obj1', frag_index=1, durable=True)],
2: [dict(ref='obj3', frag_index=2, durable=False),
dict(ref='obj2', frag_index=2, durable=False),
dict(ref='obj1', frag_index=2, durable=True)],
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
self.assertEqual(ts_1.normal, resp.headers['X-Timestamp'])
# durable frags at two timestamps: in this scenario proxy is guaranteed
# to see the durable at ts_2 with one of the first 2 responses, so will
# then prefer that when requesting from third obj server
node_state = {
0: [dict(ref='obj3', frag_index=0, durable=False),
dict(ref='obj2', frag_index=0, durable=False),
dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj3', frag_index=1, durable=False),
dict(ref='obj2', frag_index=1, durable=True)],
2: [dict(ref='obj3', frag_index=2, durable=False),
dict(ref='obj2', frag_index=2, durable=True)],
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj2']['body'])
self.assertEqual(ts_2.normal, resp.headers['X-Timestamp'])
# older durable, plus some newer non-durable, plus some even newer
# metadata files; in this scenario the fragment X-Timestamp's are
# determined by the metadata so we're checking that X-Timestamp or
# X-Backend-Timestamp do *not* interfere with the proxy EC getter
# response buckets, which should be based on X-Backend-Data-Timestamp
node_state = {
0: [dict(ref='obj3', frag_index=0, meta=True),
dict(ref='obj2', frag_index=0, durable=False),
dict(ref='obj1', frag_index=0, durable=True)],
1: [dict(ref='obj3', frag_index=1, meta=True),
dict(ref='obj1', frag_index=1, durable=True)],
2: [dict(ref='obj3', frag_index=2, meta=True),
dict(ref='obj2', frag_index=2, durable=False),
dict(ref='obj1', frag_index=2, durable=True)],
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1']['body'])
self.assertEqual(ts_3.normal, resp.headers['X-Timestamp'])
def test_GET_with_same_frag_index_on_multiple_nodes(self):
ts_iter = make_timestamp_iter()
# this is a trick to be able to get identical frags placed onto
# multiple nodes: since we cannot *copy* frags, we generate three sets
# of identical frags at same timestamp so we have enough to *move*
ts_1 = next(ts_iter)
objs = {'obj1a': dict(timestamp=ts_1, body=b'body'),
'obj1b': dict(timestamp=ts_1, body=b'body'),
'obj1c': dict(timestamp=ts_1, body=b'body')}
# arrange for duplicate frag indexes across nodes: because the object
# server prefers the highest available frag index, proxy will first get
# back two responses with frag index 1, and will then return to node 0
# for frag_index 0.
node_state = {
0: [dict(ref='obj1a', frag_index=0, durable=False),
dict(ref='obj1a', frag_index=1, durable=False)],
1: [dict(ref='obj1b', frag_index=1, durable=True)],
2: [dict(ref='obj1c', frag_index=1, durable=True)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, objs['obj1a']['body'])
# if all we have across nodes are frags with same index then expect a
# 404 (the third, 'extra', obj server GET will return 404 because it
# will be sent frag prefs that exclude frag_index 1)
node_state = {
0: [dict(ref='obj1a', frag_index=1, durable=False)],
1: [dict(ref='obj1b', frag_index=1, durable=False)],
2: [dict(ref='obj1c', frag_index=1, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 404)
# if we know it should be durable, we can be more specific.
# note that we need to set *both* of those first ones durable
# to avoid a flaky test -- in the future we can be smarter and
# let the durability bubble up, even from a duplicate frag
node_state = {
0: [dict(ref='obj1a', frag_index=1, durable=True)],
1: [dict(ref='obj1b', frag_index=1, durable=True)],
2: [dict(ref='obj1c', frag_index=1, durable=False)]
}
resp = self._setup_nodes_and_do_GET(objs, node_state)
self.assertEqual(resp.status_int, 503)
class TestObjectDisconnectCleanup(unittest.TestCase):
# update this if you need to make more different devices in do_setup
device_pattern = re.compile('sd[a-z][0-9]')
def _cleanup_devices(self):
# make sure all the object data is cleaned up
for dev in os.listdir(_testdir):
if not self.device_pattern.match(dev):
continue
device_path = os.path.join(_testdir, dev)
for datadir in os.listdir(device_path):
if any(p in datadir for p in ('account', 'container')):
continue
data_path = os.path.join(device_path, datadir)
rmtree(data_path, ignore_errors=True)
mkdirs(data_path)
def setUp(self):
skip_if_no_xattrs()
debug.hub_exceptions(False)
self._cleanup_devices()
_test_servers[0].error_limiter.stats.clear() # clear out errors
def tearDown(self):
debug.hub_exceptions(True)
self._cleanup_devices()
_test_servers[0].error_limiter.stats.clear() # clear out errors
def _check_disconnect_cleans_up(self, policy_name, is_chunked=False):
proxy_port = _test_sockets[0].getsockname()[1]
def put(path, headers=None, body=None):
conn = httplib.HTTPConnection('localhost', proxy_port)
try:
conn.connect()
conn.putrequest('PUT', path)
for k, v in (headers or {}).items():
conn.putheader(k, v)
conn.endheaders()
body = body or [b'']
for chunk in body:
if is_chunked:
chunk = b'%x\r\n%s\r\n' % (len(chunk), chunk)
conn.send(chunk)
resp = conn.getresponse()
body = resp.read()
finally:
# seriously - shut this mother down
if conn.sock:
if six.PY2:
conn.sock.fd._sock.close()
else:
conn.sock.fd._real_close()
return resp, body
# ensure container
container_path = '/v1/a/%s-disconnect-test' % policy_name
resp, _body = put(container_path, headers={
'Connection': 'close',
'X-Storage-Policy': policy_name,
'Content-Length': '0',
})
self.assertIn(resp.status, (201, 202))
def exploding_body():
for i in range(3):
yield b'\x00' * (64 * 2 ** 10)
raise Exception('kaboom!')
headers = {}
if is_chunked:
headers['Transfer-Encoding'] = 'chunked'
else:
headers['Content-Length'] = 64 * 2 ** 20
obj_path = container_path + '/disconnect-data'
try:
resp, _body = put(obj_path, headers=headers,
body=exploding_body())
except Exception as e:
if str(e) != 'kaboom!':
raise
else:
self.fail('obj put connection did not ka-splod')
sleep(0.1)
def find_files(self):
found_files = defaultdict(list)
for root, dirs, files in os.walk(_testdir):
for fname in files:
filename, ext = os.path.splitext(fname)
found_files[ext].append(os.path.join(root, fname))
return found_files
def test_repl_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_repl_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
class TestObjectECRangedGET(unittest.TestCase):
def setUp(self):
_test_servers[0].logger._clear()
_test_servers[0].error_limiter.stats.clear() # clear out errors
self.app = proxy_server.Application(
None,
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
def tearDown(self):
prosrv = _test_servers[0]
self.assertFalse(prosrv.logger.get_lines_for_level('error'))
self.assertFalse(prosrv.logger.get_lines_for_level('warning'))
prosrv.error_limiter.stats.clear() # clear out errors
@classmethod
def setUpClass(cls):
skip_if_no_xattrs()
cls.obj_name = 'range-get-test'
cls.tiny_obj_name = 'range-get-test-tiny'
cls.aligned_obj_name = 'range-get-test-aligned'
cls.zero_byte_obj_name = 'range-get-test-zero-byte'
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/ec-con HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n').encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2'
assert headers[:len(exp)] == exp, "container PUT failed"
seg_size = POLICIES.get_by_name("ec").ec_segment_size
cls.seg_size = seg_size
# EC segment size is 4 KiB, hence this gives 4 segments, which we
# then verify with a quick sanity check
cls.obj = ' my hovercraft is full of eels '.join(
str(s) for s in range(431)).encode('ascii')
assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
"object is wrong number of segments"
cls.obj_etag = md5(cls.obj, usedforsecurity=False).hexdigest()
cls.tiny_obj = b'tiny, tiny object'
assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
cls.aligned_obj = "".join(
"abcdEFGHijkl%04d" % x for x in range(512)).encode('ascii')
assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
for obj_name, obj in ((cls.obj_name, cls.obj),
(cls.tiny_obj_name, cls.tiny_obj),
(cls.aligned_obj_name, cls.aligned_obj),
(cls.zero_byte_obj_name, b"")):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (obj_name, len(obj))).encode('ascii'))
fd.write(obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"object PUT failed %s" % obj_name
def _get_obj(self, range_value, obj_name=None, ignore_range_if=''):
if obj_name is None:
obj_name = self.obj_name
if ignore_range_if:
ignore_range_if = (
'X-Backend-Ignore-Range-If-Metadata-Present: %s\r\n'
% ignore_range_if)
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write((
'GET /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Range: %s\r\n'
'%s'
'\r\n' % (obj_name, range_value, ignore_range_if)
).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
# e.g. "HTTP/1.1 206 Partial Content\r\n..."
status_code = int(headers[9:12])
headers = parse_headers_string(headers)
gotten_obj = b''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# if we get this wrong, clients will either get truncated data or
# they'll hang waiting for bytes that aren't coming, so it warrants
# being asserted for every test case
if 'Content-Length' in headers:
self.assertEqual(int(headers['Content-Length']), len(gotten_obj))
else:
self.assertIn('Transfer-Encoding', headers)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# likewise, if we say MIME and don't send MIME or vice versa,
# clients will be horribly confused
if headers.get('Content-Type', '').startswith(
'multipart/byteranges'):
self.assertEqual(gotten_obj[:2], b"--")
else:
# In general, this isn't true, as you can start an object with
# "--". However, in this test, we don't start any objects with
# "--", or even include "--" in their contents anywhere.
self.assertNotEqual(gotten_obj[:2], b"--")
return (status_code, headers, gotten_obj)
def _parse_multipart(self, content_type, body):
if six.PY2:
parser = email.parser.FeedParser()
else:
parser = email.parser.BytesFeedParser()
if not isinstance(content_type, bytes):
content_type = content_type.encode('utf8')
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type)
parser.feed(body)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertFalse(root_message.defects)
for i, message in enumerate(byteranges):
self.assertFalse(message.defects, "Part %d had defects" % i)
self.assertFalse(message.is_multipart(),
"Nested multipart at %d" % i)
return byteranges
def test_bogus(self):
status, headers, gotten_obj = self._get_obj("tacos=3-5")
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_unaligned(self):
# One segment's worth of data, but straddling two segment boundaries
# (so it has data from three segments)
status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[3783:7879])
def test_aligned_left(self):
# First byte is aligned to a segment boundary, last byte is not
status, headers, gotten_obj = self._get_obj("bytes=0-5500")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "5501")
self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
self.assertEqual(len(gotten_obj), 5501)
self.assertEqual(gotten_obj, self.obj[:5501])
def test_aligned_range(self):
# Ranged GET that wants exactly one segment
status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[4096:8192])
def test_aligned_range_end(self):
# Ranged GET that wants exactly the last segment
status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "2225")
self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
self.assertEqual(len(gotten_obj), 2225)
self.assertEqual(gotten_obj, self.obj[12288:])
def test_aligned_range_aligned_obj(self):
# Ranged GET that wants exactly the last segment, which is full-size
status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
def test_ignore_range_if_metadata_present(self):
# Ranged GET that actually wants the whole object
status, headers, gotten_obj = self._get_obj(
"bytes=4096-8191", ignore_range_if='content-type')
self.assertEqual(status, 200)
self.assertEqual(headers['Content-Length'], str(len(self.obj)))
self.assertNotIn('Content-Range', headers)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_byte_0(self):
# Just the first byte, but it's index 0, so that's easy to get wrong
status, headers, gotten_obj = self._get_obj("bytes=0-0")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "1")
self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
self.assertEqual(gotten_obj, self.obj[0:1])
def test_unsatisfiable(self):
# Goes just one byte too far off the end of the object, so it's
# unsatisfiable. This should be close enough that the object servers
# actually responded 206
obj_len = len(self.obj)
status, headers, _junk = self._get_obj(
"bytes=%d-%d" % (obj_len, obj_len + 100))
self.assertEqual(status, 416)
self.assertEqual(self.obj_etag, headers.get('Etag'))
self.assertEqual('bytes', headers.get('Accept-Ranges'))
self.assertIn('Content-Range', headers)
self.assertEqual('bytes */%d' % obj_len, headers['Content-Range'])
# Goes *way* too far off the end of the object, so we're looking at
# the (massaged) 416 from an object server
status, headers, _junk = self._get_obj(
"bytes=%d-" % (obj_len + 2 ** 30))
self.assertEqual(status, 416)
self.assertEqual(self.obj_etag, headers.get('Etag'))
self.assertEqual('bytes', headers.get('Accept-Ranges'))
self.assertIn('Content-Range', headers)
self.assertEqual('bytes */%d' % obj_len, headers['Content-Range'])
def test_unsatisfiable_socket_leak(self):
unclosed_http_responses = {}
tracked_responses = [0]
class LeakTrackingHTTPResponse(BufferedHTTPResponse):
def begin(self):
# no super(); we inherit from an old-style class (it's
# httplib's fault; don't try and fix it).
retval = BufferedHTTPResponse.begin(self)
if self.status != 204:
# This mock is overly broad and catches account and
# container HEAD requests too. We don't care about
# those; it's the object GETs that were leaky.
#
# Unfortunately, we don't have access to the request
# path here, so we use "status == 204" as a crude proxy
# for "not an object response".
unclosed_http_responses[id(self)] = self
tracked_responses[0] += 1
return retval
def close(self, *args, **kwargs):
rv = BufferedHTTPResponse.close(self, *args, **kwargs)
unclosed_http_responses.pop(id(self), None)
return rv
def __repr__(self):
swift_conn = getattr(self, 'swift_conn', None)
method = getattr(swift_conn, '_method', '<unknown>')
path = getattr(swift_conn, '_path', '<unknown>')
return '%s<method=%r path=%r>' % (
self.__class__.__name__, method, path)
obj_len = len(self.obj)
with mock.patch('swift.common.bufferedhttp.BufferedHTTPConnection'
'.response_class', LeakTrackingHTTPResponse):
status, headers, _junk = self._get_obj(
"bytes=%d-%d" % (obj_len, obj_len + 100))
self.assertEqual(status, 416) # sanity check
self.assertGreater(tracked_responses[0], 0) # ensure tracking happened
self.assertEqual(unclosed_http_responses, {})
def test_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1:])
def test_aligned_off_end(self):
# Ranged GET that starts on a segment boundary but asks for a whole lot
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (8192, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '6321')
self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
self.assertEqual(gotten_obj, self.obj[8192:])
def test_way_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte, and wants multiple segments' worth off
# the end
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1:])
def test_boundaries(self):
# Wants the last byte of segment 1 + the first byte of segment 2
status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '2')
self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
self.assertEqual(gotten_obj, self.obj[4095:4097])
def test_until_end(self):
# Wants the last byte of segment 1 + the rest
status, headers, gotten_obj = self._get_obj("bytes=4095-")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '10418')
self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
self.assertEqual(gotten_obj, self.obj[4095:])
def test_small_suffix(self):
# Small range-suffix GET: the last 100 bytes (less than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-100")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
self.assertEqual(len(gotten_obj), 100)
self.assertEqual(gotten_obj, self.obj[-100:])
def test_small_suffix_aligned(self):
# Small range-suffix GET: the last 100 bytes, last segment is
# full-size
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
self.assertEqual(len(gotten_obj), 100)
def test_suffix_zero_byte_object(self):
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.zero_byte_obj_name)
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), 0)
self.assertEqual(gotten_obj, b"")
def test_suffix_two_segs(self):
# Ask for enough data that we need the last two segments. The last
# segment is short, though, so this ensures we compensate for that.
#
# Note that the total range size is less than one full-size segment.
suffix_len = len(self.obj) % self.seg_size + 1
status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], str(suffix_len))
self.assertEqual(headers['Content-Range'],
'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
len(self.obj) - 1,
len(self.obj)))
self.assertEqual(len(gotten_obj), suffix_len)
def test_large_suffix(self):
# Large range-suffix GET: the last 5000 bytes (more than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-5000")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5000')
self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
self.assertEqual(len(gotten_obj), 5000)
self.assertEqual(gotten_obj, self.obj[-5000:])
def test_overlarge_suffix(self):
# The last N+1 bytes of an N-byte object
status, headers, gotten_obj = self._get_obj(
"bytes=-%d" % (len(self.obj) + 1))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '14513')
self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_small_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-5", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
self.assertEqual(gotten_obj, self.tiny_obj[12:])
def test_overlarge_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-1234567890", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
self.assertEqual(len(gotten_obj), len(self.tiny_obj))
self.assertEqual(gotten_obj, self.tiny_obj)
def test_multiple_ranges(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4490-5010", self.obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers["Content-Length"], str(len(gotten_obj)))
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(decode=True),
self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4490-5010/14513')
self.assertEqual(second_byterange.get_payload(decode=True),
self.obj[4490:5011])
def test_multiple_ranges_overlapping_in_segment(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-9,20-29,40-49,60-69,80-89")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 5)
def test_multiple_ranges_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_suffix_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,-13")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_one_barely_unsatisfiable(self):
# The thing about 14515-14520 is that it comes from the last segment
# in the object. When we turn this range into a fragment range,
# it'll be for the last fragment, so the object servers see
# something satisfiable.
#
# Basically, we'll get 3 byteranges from the object server, but we
# have to filter out the unsatisfiable one on our own.
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14515-14520,40-50")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[0].get_payload(decode=True),
self.obj[0:11])
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 40-50/14513")
self.assertEqual(got_byteranges[1].get_payload(decode=True),
self.obj[40:51])
def test_multiple_ranges_some_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4090-5010,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(decode=True),
self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(decode=True),
self.obj[4090:5011])
def test_two_ranges_one_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
# According to RFC 7233, this could be either a multipart/byteranges
# response with one part or it could be a single-part response (just
# the bytes, no MIME). We're locking it down here: single-part
# response. That's what replicated objects do, and we don't want any
# client-visible differences between EC objects and replicated ones.
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[:101])
def test_two_ranges_one_unsatisfiable_same_segment(self):
# Like test_two_ranges_one_unsatisfiable(), but where both ranges
# fall within the same EC segment.
status, headers, gotten_obj = self._get_obj(
"bytes=14500-14510,14520-14530")
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[14500:14511])
def test_multiple_ranges_some_unsatisfiable_out_of_order(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,99999998-99999999,4090-5010", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertIsNotNone(boundary)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(decode=True),
self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(decode=True),
self.obj[4090:5011])
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
self.app = proxy_server.Application(
None,
account_ring=FakeRing(),
container_ring=FakeRing(base_port=2000),
logger=debug_logger())
def test_convert_policy_to_index(self):
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
expected = {
'zero': 0,
'ZeRo': 0,
'one': 1,
'OnE': 1,
}
for name, index in expected.items():
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': name})
self.assertEqual(controller._convert_policy_to_index(req), index)
# default test
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.assertIsNone(controller._convert_policy_to_index(req))
# negative test
req = Request.blank('/a/c',
headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'nada'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
# storage policy two is deprecated
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'two'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
def test_convert_index_to_name(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': int(policy)},
) as fake_conn:
resp = req.get_response(self.app)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
def test_no_convert_index_to_name_when_container_not_found(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 404, 404, 404,
headers={'X-Backend-Storage-Policy-Index':
int(policy)}) as fake_conn:
resp = req.get_response(self.app)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 404)
self.assertIsNone(resp.headers['X-Storage-Policy'])
def test_error_convert_index_to_name(self):
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
resp = req.get_response(self.app)
with self.assertRaises(StopIteration):
next(fake_conn.code_iter)
self.assertEqual(resp.status_int, 200)
self.assertIsNone(resp.headers['X-Storage-Policy'])
error_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for msg in error_lines:
expected = "Could not translate " \
"X-Backend-Storage-Policy-Index ('-1')"
self.assertIn(expected, msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
'x-container-read': '*:user',
'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
'x-container-read': '*:user',
'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
raise_exc=False, missing_container=False):
with save_globals():
kwargs = {'missing_container': missing_container}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c/', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
def test_HEAD_GET(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def test_status_map(statuses, expected,
c_expected=None, a_expected=None, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.HEAD(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
infocache = res.environ.get('swift.infocache', {})
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if expected < 300:
self.assertIn('last-modified', res.headers)
self.assertEqual(res.headers['last-modified'], '1')
if c_expected:
self.assertIn('container/a/c', infocache)
self.assertEqual(
infocache['container/a/c']['status'],
c_expected)
else:
self.assertNotIn('container/a/c', infocache)
if a_expected:
self.assertIn('account/a', infocache)
self.assertEqual(infocache['account/a']['status'],
a_expected)
else:
self.assertNotIn('account/a', res.environ)
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
infocache = res.environ.get('swift.infocache', {})
if expected < 400:
self.assertIn('x-works', res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if expected < 300:
self.assertIn('last-modified', res.headers)
self.assertEqual(res.headers['last-modified'], '1')
if c_expected:
self.assertIn('container/a/c', infocache)
self.assertEqual(
infocache['container/a/c']['status'],
c_expected)
else:
self.assertNotIn('container/a/c', infocache)
if a_expected:
self.assertIn('account/a', infocache)
self.assertEqual(infocache['account/a']['status'],
a_expected)
else:
self.assertNotIn('account/a', infocache)
# In all the following tests cache 200 for account
# return and cache vary for container
# return 200 and cache 200 for account and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# cache a 200 for the account because it appears to be created
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 200)
def test_PUT_policy_headers(self):
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
if method == 'PUT':
backend_requests.append(headers)
def test_policy(requested_policy):
with save_globals():
mock_conn = set_http_connect(200, 201, 201, 201,
give_connect=capture_requests)
req = Request.blank('/v1/a/test', method='PUT',
headers={'Content-Length': 0})
if requested_policy:
expected_policy = requested_policy
req.headers['X-Storage-Policy'] = policy.name
else:
expected_policy = POLICIES.default
res = req.get_response(self.app)
if expected_policy.is_deprecated:
self.assertEqual(res.status_int, 400)
self.assertEqual(0, len(backend_requests))
expected = b'is deprecated'
self.assertIn(expected, res.body,
'%r did not include %r' % (
res.body, expected))
return
self.assertEqual(res.status_int, 201)
self.assertEqual(
expected_policy.object_ring.replicas,
len(backend_requests))
for headers in backend_requests:
if not requested_policy:
self.assertNotIn('X-Backend-Storage-Policy-Index',
headers)
self.assertIn('X-Backend-Storage-Policy-Default',
headers)
self.assertEqual(
int(expected_policy),
int(headers['X-Backend-Storage-Policy-Default']))
else:
self.assertIn('X-Backend-Storage-Policy-Index',
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
int(policy))
# make sure all mocked responses are consumed
with self.assertRaises(StopIteration):
next(mock_conn.code_iter)
test_policy(None) # no policy header
for policy in POLICIES:
backend_requests = [] # reset backend requests
test_policy(policy)
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
# fail to retrieve account info
test_status_map(
(503, 503, 503), # account_info fails on 503
503, missing_container=True)
# account fail after creation
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
404, 404, 404), # account_info fail
404, missing_container=True)
test_status_map(
(503, 503, 404, # account_info fails on 404
503, 503, 503, # PUT account
503, 503, 404), # account_info fail
503, missing_container=True)
# put fails
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
503, 503, 201), # put container fail
503, missing_container=True)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True)
test_status_map(
(503, 404, 404, # account_info fails on 404
503, 201, 201, # PUT account
503, 200, # account_info success
503, 201, 201), # put container success
201, missing_container=True)
def test_PUT_autocreate_account_with_sysmeta(self):
# x-account-sysmeta headers in a container PUT request should be
# transferred to the account autocreate PUT request
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
headers=headers,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual('/account', call['path'])
self.assertIn(key, call['headers'],
'%s call, key %s missing in headers %s' % (
call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_PUT_autocreate_account_utf8(self):
with save_globals():
controller = proxy_server.ContainerController(
self.app, wsgi_to_str('\xe2\x98\x83'),
wsgi_to_str('\xe2\x98\x83'))
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual(wsgi_to_str('/\xe2\x98\x83'), call['path'])
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.POST(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, missing_container=True)
def test_PUT_max_containers_per_account(self):
with save_globals():
self.app.max_containers_per_account = 12346
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'container_new')
self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
self.app.max_containers_whitelist = ['account']
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
def test_PUT_max_container_name_length(self):
limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (), 400,
missing_container=True)
# internal auto-created-accounts get higher limits
limit *= 2
controller = proxy_server.ContainerController(self.app, '.account',
'3' * limit)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, '.account',
'4' * (limit + 1))
self.assert_status_map(controller.PUT, (), 400,
missing_container=True)
self.app.auto_create_account_prefix = 'acc'
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (), 400,
missing_container=True)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503,
missing_container=True)
def test_acc_missing_returns_404(self):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.error_limiter.stats.clear()
controller = proxy_server.ContainerController(self.app,
'account',
'container')
if meth == 'PUT':
set_http_connect(200, 200, 200, 200, 200, 200,
missing_container=True)
else:
set_http_connect(200, 200, 200, 200)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
self.app.update_request(req)
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, raise_exc=True)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev,
self.app.error_limiter.suppression_limit + 1,
time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
def test_error_limiting(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l, *args, **kwargs: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
for _junk in range(self.app.error_limiter.suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]),
self.app.error_limiter.suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 503)
self.app.error_limiter.suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200)
self.assert_status_map(controller.DELETE, (200, 204, 204, 204),
404, raise_exc=True)
def test_DELETE(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,
(200, 204, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 404), 503)
# 200: Account check, 404x3: Container check
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Container-Meta-TestHeader', 'TestValue'),
('X-Container-Meta-TestHeader', ''),
('X-Remove-Container-Meta-TestHeader', 'anything'),
('X-Container-Read', '.r:*'),
('X-Remove-Container-Read', 'anything'),
('X-Container-Write', 'anyone'),
('X-Remove-Container-Write', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
controller = \
proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201, give_connect=test_connect)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': method, 'swift_owner': True},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_POST_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_GET_no_content(self):
with save_globals():
set_http_connect(200, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
ic = res.environ['swift.infocache']
self.assertEqual(ic['container/a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertNotIn('transfer-encoding', res.headers)
def test_GET_account_non_existent(self):
with save_globals():
set_http_connect(404, 404, 404)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 404)
self.assertNotIn('container/a/c', res.environ['swift.infocache'])
def test_GET_auto_create_prefix_account_non_existent(self):
with save_globals():
set_http_connect(404, 404, 404, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, '.a', 'c')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
ic = res.environ['swift.infocache']
self.assertEqual(ic['container/.a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertNotIn('transfer-encoding', res.headers)
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(
res.environ['swift.infocache']['container/a/c']['status'],
201)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_unauthorized_requests_when_account_not_found(self):
# verify unauthorized container requests always return response
# from swift.authorize
called = [0, 0]
def authorize(req):
called[0] += 1
return HTTPUnauthorized(request=req)
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None,
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE'):
# no delay_denial on method, expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([1, 0], called)
for method in ('HEAD', 'GET'):
# delay_denial on method, expect two calls to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([2, 1], called)
def test_authorized_requests_when_account_not_found(self):
# verify authorized container requests always return 404 when
# account not found
called = [0, 0]
def authorize(req):
called[0] += 1
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None,
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
# expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(404, res.status_int)
self.assertEqual([1, 1], called)
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
count = [0]
def my_get_info(app, env, account, container=None,
ret_not_found=False, swift_source=None):
if count[0] > 11:
return {}
count[0] += 1
if not container:
return {'some': 'stuff'}
return proxy_base.was_get_info(
app, env, account, container, ret_not_found, swift_source)
proxy_base.was_get_info = proxy_base.get_info
with mock.patch.object(proxy_base, 'get_info', my_get_info):
proxy_base.get_info = my_get_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
controller.OPTIONS(req)
self.assertLess(count[0], 11)
def test_OPTIONS(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb,
resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers': ' , ,,',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
self.assertNotIn('access-control-allow-headers', resp.headers)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb,
resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers':
'x-foo, x-bar, , x-auth-token',
'Access-Control-Request-Method': 'GET'}
)
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sortHeaderNames('x-foo, x-bar, x-auth-token'),
sortHeaderNames(resp.headers['access-control-allow-headers']))
self.assertEqual('Access-Control-Request-Headers',
resp.headers.get('vary'))
def test_CORS_valid(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def containerGET(controller, req):
return Response(headers={
'X-Container-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(containerGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-container-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertIn('access-control-expose-headers', resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set([
'cache-control', 'content-language', 'content-type', 'expires',
'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id',
'x-openstack-request-id', 'x-container-meta-color'])
self.assertEqual(expected_exposed, exposed)
def _gather_x_account_headers(self, controller_call, req, *connect_args,
**kwargs):
seen_headers = []
to_capture = ('X-Account-Partition', 'X-Account-Host',
'X-Account-Device')
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in to_capture:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account HEAD, so throw away the
# first element
return sorted(seen_headers[1:],
key=lambda d: d['X-Account-Host'] or 'Z')
def test_PUT_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_PUT_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_DELETE_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_DELETE_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_PUT_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
with self.assertRaises(StopIteration):
next(new_connect.code_iter)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match(r'[0-9]{10}\.[0-9]{5}', timestamp))
def test_DELETE_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
with self.assertRaises(StopIteration):
next(new_connect.code_iter)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match(r'[0-9]{10}\.[0-9]{5}', timestamp))
def test_node_read_timeout_retry_to_container(self):
with save_globals():
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
self.app.node_timeout = 0.1
set_http_connect(200, 200, 200, body='abcdef', slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
conf = {'error_suppression_interval': 0}
self.app = proxy_server.Application(conf,
account_ring=FakeRing(),
container_ring=FakeRing())
def assert_status_map(self, method, statuses, expected, env_expected=None,
headers=None, **kwargs):
headers = headers or {}
with save_globals():
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
infocache = res.environ.get('swift.infocache', {})
if env_expected:
self.assertEqual(infocache['account/a']['status'],
env_expected)
set_http_connect(*statuses)
req = Request.blank('/v1/a/', {})
self.app.update_request(req)
res = method(req)
infocache = res.environ.get('swift.infocache', {})
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(infocache['account/a']['status'],
env_expected)
def test_OPTIONS(self):
with save_globals():
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
# Test a CORS OPTIONS request (i.e. including Origin and
# Access-Control-Request-Method headers)
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank(
'/v1/account', {'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertIn(verb, resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
def test_GET(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
# GET returns after the first successful call to an Account Server
self.assert_status_map(controller.GET, (200,), 200, 200)
self.assert_status_map(controller.GET, (503, 200), 200, 200)
self.assert_status_map(controller.GET, (503, 503, 200), 200, 200)
self.assert_status_map(controller.GET, (204,), 204, 204)
self.assert_status_map(controller.GET, (503, 204), 204, 204)
self.assert_status_map(controller.GET, (503, 503, 204), 204, 204)
self.assert_status_map(controller.GET, (404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 503, 204), 204, 204)
# If Account servers fail, if autocreate = False, return majority
# response
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
self.assert_status_map(controller.GET, (404, 404, 503), 404, 404)
self.assert_status_map(controller.GET, (404, 503, 503), 503)
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
def test_GET_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.assertFalse(self.app.account_autocreate)
# Repeat the test for autocreate = False and 404 by all
self.assert_status_map(controller.GET,
(404, 404, 404), 404)
self.assert_status_map(controller.GET,
(404, 503, 404), 404)
# When autocreate is True, if none of the nodes respond 2xx
# And quorum of the nodes responded 404,
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
expected = 200
self.assert_status_map(controller.GET, (404, 404, 404), expected)
self.assert_status_map(controller.GET, (404, 503, 404), expected)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def _check_autocreate_listing_with_query_string(self, query_string):
controller = proxy_server.AccountController(self.app, 'a')
controller.app.account_autocreate = True
statuses = (404, 404, 404)
expected = 200
# get the response to check it has json content
with save_globals():
set_http_connect(*statuses)
req = Request.blank('/v1/a' + query_string)
self.app.update_request(req)
res = controller.GET(req)
headers = res.headers
self.assertEqual(
'yes', headers.get('X-Backend-Fake-Account-Listing'))
self.assertEqual(
'application/json; charset=utf-8',
headers.get('Content-Type'))
self.assertEqual([], json.loads(res.body))
self.assertEqual(res.status_int, expected)
def test_auto_create_account_listing_response_is_json(self):
self._check_autocreate_listing_with_query_string('')
self._check_autocreate_listing_with_query_string('?format=plain')
self._check_autocreate_listing_with_query_string('?format=json')
self._check_autocreate_listing_with_query_string('?format=xml')
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.assert_status_map(controller.HEAD, (200,), 200, 200)
self.assert_status_map(controller.HEAD, (503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (503, 204), 204, 204)
self.assert_status_map(controller.HEAD, (204, 503, 503), 204, 204)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (404, 404, 404), 404, 404)
self.assert_status_map(controller.HEAD, (404, 404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 404, 503), 404, 404)
self.assert_status_map(controller.HEAD, (404, 503, 503), 503)
self.assert_status_map(controller.HEAD, (404, 503, 204), 204, 204)
def test_HEAD_autocreate(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.HEAD,
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 200)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 200)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)
def test_POST_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201)
# account_info PUT account POST account
self.assert_status_map(
controller.POST,
(404, 404, 503, 201, 201, 503, 204, 204, 504), 204)
# what if create fails
self.assert_status_map(
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
def test_POST_autocreate_with_sysmeta(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'a')
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
# POST , autocreate PUT, POST again
headers=headers,
give_connect=callback)
self.assertEqual(9, len(calls))
for call in calls:
self.assertIn(key, call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
res.body
self.assertIn('accept-ranges', res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_PUT_max_account_name_length(self):
self.app.allow_account_management = True
limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '2' * (limit + 1))
self.assert_status_map(controller.PUT, (), 400)
# internal auto-created accounts get higher limits
limit *= 2
controller = proxy_server.AccountController(
self.app, '.' + '3' * (limit - 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '.' + '4' * limit)
self.assert_status_map(controller.PUT, (), 400)
self.app.auto_create_account_prefix = 'FOO_'
limit //= 2
controller = proxy_server.AccountController(
self.app, '.' + '5' * (limit - 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '.' + '6' * limit)
self.assert_status_map(controller.PUT, (), 400)
limit *= 2
controller = proxy_server.AccountController(
self.app, 'FOO_' + '7' * (limit - 4))
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, 'FOO_' + '8' * (limit - 3))
self.assert_status_map(controller.PUT, (), 400)
def test_PUT_connect_exceptions(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, -1), 201)
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
def test_PUT_status(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, 202), 202)
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Account-Meta-TestHeader', 'TestValue'),
('X-Account-Meta-TestHeader', ''),
('X-Remove-Account-Meta-TestHeader', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
self.app.allow_account_management = True
controller = \
proxy_server.AccountController(self.app, 'a')
set_http_connect(201, 201, 201, give_connect=test_connect)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_DELETE(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_DELETE_with_query_string(self):
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a?whoops',
environ={'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 400)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 400)
test_status_map((201, 201, 500), 400)
test_status_map((201, 500, 500), 400)
test_status_map((204, 500, 404), 400)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
have to match the responses for empty accounts that really exist.
"""
def setUp(self):
conf = {'account_autocreate': 'yes'}
self.app = listing_formats.ListingFilter(
proxy_server.Application(conf,
account_ring=FakeRing(),
container_ring=FakeRing()),
{})
def test_GET_autocreate_accept_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank(
'/v1/a', headers={'Accept': 'application/json'},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual(b"[]", resp.body)
def test_GET_autocreate_format_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=json',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=json'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual(b"[]", resp.body)
def test_GET_autocreate_accept_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "text/xml"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('text/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = (b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_format_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=xml',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=xml'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = (b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_accept_unknown(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "mystery/meat"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(406, resp.status_int)
def test_GET_autocreate_bad_accept(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "a/b;q=nope"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
self.assertEqual(b'Invalid Accept header', resp.body)
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "a/b;q=0.5;q=1"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
self.assertEqual(b'Invalid Accept header', resp.body)
def test_GET_autocreate_format_invalid_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=\xff\xfe',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=\xff\xfe'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_GET_autocreate_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/\xe2\x98\x83',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/\xe2\x98\x83'})
resp = req.get_response(self.app)
self.assertEqual(204, resp.status_int)
def test_account_acl_header_access(self):
acl = {
'admin': ['AUTH_alice'],
'read-write': ['AUTH_bob'],
'read-only': ['AUTH_carol'],
}
prefix = get_sys_meta_prefix('account')
privileged_headers = {(prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
app = proxy_server.Application(
None, account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET'})
resp = app.handle_request(req)
# Not a swift_owner -- ACLs should NOT be in response
header = 'X-Account-Access-Control'
self.assertNotIn(header, resp.headers, '%r was in %r' % (
header, resp.headers))
# Same setup -- mock acct server will provide ACLs
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET',
'swift_owner': True})
resp = app.handle_request(req)
# For a swift_owner, the ACLs *should* be in response
self.assertIn(header, resp.headers, '%r not in %r' % (
header, resp.headers))
def test_account_acls_through_delegation(self):
# Define a way to grab the requests sent out from the AccountController
# to the Account Server, and a way to inject responses we'd like the
# Account Server to return.
resps_to_send = []
@contextmanager
def patch_account_controller_method(verb):
old_method = getattr(proxy_server.AccountController, verb)
new_method = lambda self, req, *_, **__: resps_to_send.pop(0)
try:
setattr(proxy_server.AccountController, verb, new_method)
yield
finally:
setattr(proxy_server.AccountController, verb, old_method)
def make_test_request(http_method, swift_owner=True):
env = {
'REQUEST_METHOD': http_method,
'swift_owner': swift_owner,
}
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {} if http_method in ('GET', 'HEAD') else {
'x-account-access-control': format_acl(version=2, acl_dict=acl)
}
return Request.blank('/v1/a', environ=env, headers=headers)
# Our AccountController will invoke methods to communicate with the
# Account Server, and they will return responses like these:
def make_canned_response(http_method):
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {'x-account-sysmeta-core-access-control': format_acl(
version=2, acl_dict=acl)}
canned_resp = Response(headers=headers)
canned_resp.environ = {
'PATH_INFO': '/acct',
'REQUEST_METHOD': http_method,
}
resps_to_send.append(canned_resp)
app = proxy_server.Application(
None, account_ring=FakeRing(),
container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
with patch_account_controller_method('GETorHEAD_base'):
# GET/HEAD requests should remap sysmeta headers from acct server
for verb in ('GET', 'HEAD'):
make_canned_response(verb)
req = make_test_request(verb)
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
# swift_owner = False: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
resp = app.handle_request(req)
h = resp.headers
self.assertIsNone(h.get(ext_header))
# swift_owner unset: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
del req.environ['swift_owner']
resp = app.handle_request(req)
h = resp.headers
self.assertIsNone(h.get(ext_header))
# Verify that PUT/POST requests remap sysmeta headers from acct server
with patch_account_controller_method('make_requests'):
make_canned_response('PUT')
req = make_test_request('PUT')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
make_canned_response('POST')
req = make_test_request('POST')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
# This is just a simple test that can be used to verify and debug the
# various data paths between the proxy server and the object
# server. Used as a play ground to debug buffer sizes for sockets.
skip_if_no_xattrs()
_test_servers[0].error_limiter.stats.clear() # clear out errors
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is transmitting in 2 MB chunks
fd = sock.makefile('rwb', 2 * 1024 * 1024)
# Small, fast for testing
obj_len = 2 * 64 * 1024
# Use 1 GB or more for measurements
# obj_len = 2 * 512 * 1024 * 1024
self.path = '/v1/a/c/o.large'
fd.write(('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.path, str(obj_len))).encode('ascii'))
fd.write(b'a' * obj_len)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.obj_len = obj_len
def test_GET_debug_large_file(self):
for i in range(10):
start = time.time()
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is reading in 2 MB chunks
fd = sock.makefile('rwb', 2 * 1024 * 1024)
fd.write(('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.path).encode('ascii'))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
total = 0
while True:
buf = fd.read(100000)
if not buf:
break
total += len(buf)
self.assertEqual(total, self.obj_len)
end = time.time()
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
StoragePolicy(2, 'deprecated', is_deprecated=True,
object_ring=FakeRing()),
StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
app = proxy_server.Application({},
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/info')
resp = req.get_response(app)
si = json.loads(resp.body)['swift']
self.assertIn('version', si)
self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
self.assertEqual(si['max_meta_name_length'],
constraints.MAX_META_NAME_LENGTH)
self.assertEqual(si['max_meta_value_length'],
constraints.MAX_META_VALUE_LENGTH)
self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
self.assertEqual(si['max_meta_overall_size'],
constraints.MAX_META_OVERALL_SIZE)
self.assertEqual(si['account_listing_limit'],
constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
constraints.MAX_CONTAINER_NAME_LENGTH)
self.assertEqual(si['max_object_name_length'],
constraints.MAX_OBJECT_NAME_LENGTH)
self.assertIn('strict_cors_mode', si)
self.assertFalse(si['allow_account_management'])
self.assertFalse(si['account_autocreate'])
# this next test is deliberately brittle in order to alert if
# other items are added to swift info
self.assertEqual(len(si), 17)
si = registry.get_swift_info()['swift']
# Tehse settings is by default excluded by disallowed_sections
self.assertEqual(si['valid_api_versions'],
constraints.VALID_API_VERSIONS)
self.assertEqual(si['auto_create_account_prefix'],
constraints.AUTO_CREATE_ACCOUNT_PREFIX)
self.assertIn('policies', si)
sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
self.assertEqual(len(sorted_pols), 3)
for policy in sorted_pols:
self.assertNotEqual(policy['name'], 'deprecated')
self.assertEqual(sorted_pols[0]['name'], 'bert')
self.assertEqual(sorted_pols[1]['name'], 'ernie')
self.assertEqual(sorted_pols[2]['name'], 'migrated')
class TestSocketObjectVersions(unittest.TestCase):
def setUp(self):
global _test_sockets
skip_if_no_xattrs()
_test_servers[0].error_limiter.stats.clear() # clear out errors
self.prolis = prolis = listen_zero()
self._orig_prolis = _test_sockets[0]
allowed_headers = ', '.join([
'content-encoding',
'x-object-manifest',
'content-disposition',
'foo'
])
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers': allowed_headers}
prosrv = listing_formats.ListingFilter(
copy.ServerSideCopyMiddleware(
versioned_writes.VersionedWritesMiddleware(
proxy_logging.ProxyLoggingMiddleware(
_test_servers[0], conf,
logger=_test_servers[0].logger), {}),
{}
),
{}, logger=_test_servers[0].logger
)
self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger(),
protocol=SwiftHttpProtocol)
# replace global prosrv with one that's filtered with version
# middleware
self.sockets = list(_test_sockets)
self.sockets[0] = prolis
_test_sockets = tuple(self.sockets)
def tearDown(self):
self.coro.kill()
# put the global state back
global _test_sockets
self.sockets[0] = self._orig_prolis
_test_sockets = tuple(self.sockets)
_test_servers[0].error_limiter.stats.clear() # clear out errors
def test_version_manifest(self, oc=b'versions', vc=b'vers', o=b'name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) = _test_sockets
pre = quote('%03x' % len(o)).encode('ascii')
osub = b'%s/sub' % o
presub = quote('%03x' % len(osub)).encode('ascii')
osub = quote(osub).encode('ascii')
presub = quote(presub).encode('ascii')
oc = quote(oc).encode('ascii')
vc = quote(vc).encode('ascii')
def put_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n'
% (oc, vc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
headers = put_container()
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def get_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# check that the header was set
headers, body = get_container()
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'X-Versions-Location: %s' % vc, headers)
def put_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# make the container for the object versions
headers = put_version_container()
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def put(version):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: t'
b'\r\nContent-Length: 5\r\nContent-Type: text/jibberish%d'
b'\r\n\r\n%05d\r\n' % (oc, o, version, version))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def get(container=oc, obj=o):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n'
b'\r\n' % (container, obj))
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Create the versioned file
headers = put(0)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the object versions
for version in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
headers = put(version)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'Content-Type: text/jibberish%d' % version, headers)
self.assertNotIn(b'X-Object-Meta-Foo: barbaz', headers)
self.assertEqual(body, b'%05d' % version)
def get_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Ensure we have the right number of versions saved
headers, body = get_version_container()
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
versions = [x for x in body.split(b'\n') if x]
self.assertEqual(len(versions), versions_to_create - 1)
def delete():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r'
b'\nConnection: close\r\nX-Storage-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'COPY /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: '
b't\r\nDestination: %s/copied_name\r\n'
b'Content-Length: 0\r\n\r\n' % (oc, o, oc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# copy a version and make sure the version info is stripped
headers = copy()
exp = b'HTTP/1.1 2' # 2xx series response to the COPY
self.assertEqual(headers[:len(exp)], exp)
def get_copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\n'
b'X-Auth-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
headers, body = get_copy()
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertEqual(body, b'%05d' % version)
def post():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'POST /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: '
b't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n'
b'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# post and make sure it's updated
headers = post()
exp = b'HTTP/1.1 2' # 2xx series response to the POST
self.assertEqual(headers[:len(exp)], exp)
headers, body = get()
self.assertIn(b'Content-Type: foo/bar', headers)
self.assertIn(b'X-Object-Meta-Bar: foo', headers)
self.assertEqual(body, b'%05d' % version)
# check container listing
headers, body = get_container()
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object versions
for segment in range(versions_to_create - 1, 0, -1):
headers = delete()
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'Content-Type: text/jibberish%d' % (segment - 1),
headers)
self.assertEqual(body, b'%05d' % (segment - 1))
# Ensure we have the right number of versions saved
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r'
b'\n' % (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split(b'\n') if x]
self.assertEqual(len(versions), segment - 1)
# there is now one version left (in the manifest)
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# delete the last version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure it's all gone
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
# make sure manifest files are also versioned
for _junk in range(0, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 0\r\n'
b'Content-Type: text/jibberish0\r\n'
b'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
% (oc, o, oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: '
b'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200 OK'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split(b'\n') if x]
self.assertEqual(versions_to_create - 1, len(versions))
# DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
b'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
b'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
b'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
b'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, presub, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split(b'\n') if x]
self.assertEqual(len(versions), 1)
# Check for when the versions target container doesn't exist
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n'
b'Content-Length: 0\r\n'
b'X-Versions-Location: none\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the versioned file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create another version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
b'localhost\r\nConnection: close\r\nX-Storage-Token: '
b't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n'
b'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 2' # 2xx response
self.assertEqual(headers[:len(exp)], exp)
def test_version_manifest_utf8(self):
oc = b'0_oc_non_ascii\xc2\xa3'
vc = b'0_vc_non_ascii\xc2\xa3'
o = b'0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container(self):
oc = b'1_oc_non_ascii\xc2\xa3'
vc = b'1_vc_ascii'
o = b'1_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container(self):
oc = b'2_oc_ascii'
vc = b'2_vc_non_ascii\xc2\xa3'
o = b'2_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_containers(self):
oc = b'3_oc_non_ascii\xc2\xa3'
vc = b'3_vc_non_ascii\xc2\xa3'
o = b'3_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_object(self):
oc = b'4_oc_ascii'
vc = b'4_vc_ascii'
o = b'4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container_utf_object(self):
oc = b'5_oc_ascii'
vc = b'5_vc_non_ascii\xc2\xa3'
o = b'5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container_utf_object(self):
oc = b'6_oc_non_ascii\xc2\xa3'
vc = b'6_vc_ascii'
o = b'6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/test_server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
import unittest
import os
from tempfile import mkdtemp
import shutil
from swift.common.middleware.copy import ServerSideCopyMiddleware
from swift.common.storage_policy import StoragePolicy
from swift.common.swob import Request
from swift.common.utils import mkdirs, split_path
from swift.common.wsgi import WSGIContext
from swift.obj import server as object_server
from swift.proxy import server as proxy
import swift.proxy.controllers
from swift.proxy.controllers.base import get_object_info
from test.debug_logger import debug_logger
from test.unit import FakeMemcache, FakeRing, fake_http_connect, \
patch_policies, skip_if_no_xattrs
class FakeServerConnection(WSGIContext):
'''Fakes an HTTPConnection to a server instance.'''
def __init__(self, app):
super(FakeServerConnection, self).__init__(app)
self.data = b''
def getheaders(self):
return self._response_headers
def read(self, amt=None):
try:
return next(self.resp_iter)
except StopIteration:
return b''
def getheader(self, name, default=None):
result = self._response_header_value(name)
return result if result else default
def getresponse(self):
environ = {'REQUEST_METHOD': self.method}
req = Request.blank(self.path, environ, headers=self.req_headers,
body=self.data)
self.data = b''
self.resp = self._app_call(req.environ)
self.resp_iter = iter(self.resp)
if self._response_headers is None:
self._response_headers = []
status_parts = self._response_status.split(' ', 1)
self.status = int(status_parts[0])
self.reason = status_parts[1] if len(status_parts) == 2 else ''
return self
def getexpect(self):
class ContinueResponse(object):
status = 100
return ContinueResponse()
def send(self, data):
self.data += data
def close(self):
pass
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)
self.method = method
self.req_headers = headers
return self
def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(replicas=1))])
class TestObjectSysmeta(unittest.TestCase):
'''Tests object sysmeta is correctly handled by combination
of proxy server and object server.
'''
def _assertStatus(self, resp, expected):
self.assertEqual(resp.status_int, expected,
'Expected %d, got %s'
% (expected, resp.status))
def _assertInHeaders(self, resp, expected):
for key, val in expected.items():
self.assertIn(key, resp.headers,
'Header %s missing from %s' % (key, resp.headers))
self.assertEqual(val, resp.headers[key],
'Expected header %s:%s, got %s:%s'
% (key, val, key, resp.headers[key]))
def _assertNotInHeaders(self, resp, unexpected):
for key, val in unexpected.items():
self.assertNotIn(key, resp.headers,
'Header %s not expected in %s'
% (key, resp.headers))
def setUp(self):
skip_if_no_xattrs()
self.app = proxy.Application(None,
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(replicas=1),
container_ring=FakeRing(replicas=1))
self.copy_app = ServerSideCopyMiddleware(self.app, {})
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))
http_connect = get_http_connect(fake_http_connect(200),
fake_http_connect(200),
FakeServerConnection(self.obj_ctlr))
self.orig_base_http_connect = swift.proxy.controllers.base.http_connect
self.orig_obj_http_connect = swift.proxy.controllers.obj.http_connect
swift.proxy.controllers.base.http_connect = http_connect
swift.proxy.controllers.obj.http_connect = http_connect
def tearDown(self):
shutil.rmtree(self.tmpdir)
swift.proxy.controllers.base.http_connect = self.orig_base_http_connect
swift.proxy.controllers.obj.http_connect = self.orig_obj_http_connect
original_sysmeta_headers_1 = {'x-object-sysmeta-test0': 'val0',
'x-object-sysmeta-test1': 'val1'}
original_sysmeta_headers_2 = {'x-object-sysmeta-test2': 'val2'}
changed_sysmeta_headers = {'x-object-sysmeta-test0': '',
'x-object-sysmeta-test1': 'val1 changed'}
new_sysmeta_headers = {'x-object-sysmeta-test3': 'val3'}
original_meta_headers_1 = {'x-object-meta-test0': 'meta0',
'x-object-meta-test1': 'meta1'}
original_meta_headers_2 = {'x-object-meta-test2': 'meta2'}
changed_meta_headers = {'x-object-meta-test0': '',
'x-object-meta-test1': 'meta1 changed'}
new_meta_headers = {'x-object-meta-test3': 'meta3'}
bad_headers = {'x-account-sysmeta-test1': 'bad1'}
# these transient_sysmeta headers get changed...
original_transient_sysmeta_headers_1 = \
{'x-object-transient-sysmeta-testA': 'A'}
# these transient_sysmeta headers get deleted...
original_transient_sysmeta_headers_2 = \
{'x-object-transient-sysmeta-testB': 'B'}
# these are replacement transient_sysmeta headers
changed_transient_sysmeta_headers = \
{'x-object-transient-sysmeta-testA': 'changed_A'}
new_transient_sysmeta_headers_1 = {'x-object-transient-sysmeta-testC': 'C'}
new_transient_sysmeta_headers_2 = {'x-object-transient-sysmeta-testD': 'D'}
def test_PUT_sysmeta_then_GET(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
hdrs.update(self.original_transient_sysmeta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_PUT_sysmeta_then_HEAD(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
hdrs.update(self.original_transient_sysmeta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank(path, environ=env)
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_replaced_by_PUT(self):
path = '/v1/a/c/o'
cache = FakeMemcache()
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={'swift.cache': cache})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.original_meta_headers_2)
def test_sysmeta_not_updated_by_POST(self):
# check sysmeta is not changed by a POST but user meta is replaced
path = '/v1/a/c/o'
cache = FakeMemcache()
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'POST', 'swift.cache': cache}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 202)
req = Request.blank(path, environ={'swift.cache': cache})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertNotInHeaders(resp, self.bad_headers)
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={'swift.cache': cache})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertNotInHeaders(resp, self.original_sysmeta_headers_2)
def test_sysmeta_updated_by_COPY(self):
# check sysmeta is updated by a COPY in same way as user meta by
# issuing requests to the copy middleware app
path = '/v1/a/c/o'
dest = '/c/o2'
cache = FakeMemcache()
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
hdrs.update(self.original_transient_sysmeta_headers_1)
hdrs.update(self.original_transient_sysmeta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'COPY', 'swift.cache': cache}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.changed_transient_sysmeta_headers)
hdrs.update(self.new_transient_sysmeta_headers_1)
hdrs.update(self.bad_headers)
hdrs.update({'Destination': dest})
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertInHeaders(resp, self.changed_transient_sysmeta_headers)
self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={'swift.cache': cache})
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertInHeaders(resp, self.changed_transient_sysmeta_headers)
self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
def test_sysmeta_updated_by_COPY_from(self):
# check sysmeta is updated by a PUT with x-copy-from in same way as
# user meta by issuing requests to the copy middleware app
path = '/v1/a/c/o'
cache = FakeMemcache()
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.changed_sysmeta_headers)
hdrs.update(self.new_sysmeta_headers)
hdrs.update(self.changed_meta_headers)
hdrs.update(self.new_meta_headers)
hdrs.update(self.bad_headers)
hdrs.update({'X-Copy-From': '/c/o'})
req = Request.blank('/v1/a/c/o2', environ=env, headers=hdrs, body=b'')
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 201)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
req = Request.blank('/v1/a/c/o2', environ={'swift.cache': cache})
resp = req.get_response(self.copy_app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_sysmeta_headers)
self._assertInHeaders(resp, self.new_sysmeta_headers)
self._assertInHeaders(resp, self.original_sysmeta_headers_2)
self._assertInHeaders(resp, self.changed_meta_headers)
self._assertInHeaders(resp, self.new_meta_headers)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertNotInHeaders(resp, self.bad_headers)
def test_transient_sysmeta_replaced_by_PUT_or_POST(self):
# check transient_sysmeta is replaced en-masse by a POST
path = '/v1/a/c/o'
cache = FakeMemcache()
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.original_transient_sysmeta_headers_1)
hdrs.update(self.original_transient_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2)
self._assertInHeaders(resp, self.original_meta_headers_1)
info = get_object_info(req.environ, self.app)
self.assertEqual(2, len(info.get('transient_sysmeta', ())))
self.assertEqual({'testa': 'A', 'testb': 'B'},
info['transient_sysmeta'])
# POST will replace all existing transient_sysmeta and usermeta values
env = {'REQUEST_METHOD': 'POST', 'swift.cache': cache}
hdrs = dict(self.changed_transient_sysmeta_headers)
hdrs.update(self.new_transient_sysmeta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs)
resp = req.get_response(self.app)
self._assertStatus(resp, 202)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.changed_transient_sysmeta_headers)
self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1)
self._assertNotInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp,
self.original_transient_sysmeta_headers_2)
info = get_object_info(req.environ, self.app)
self.assertEqual(2, len(info.get('transient_sysmeta', ())))
self.assertEqual({'testa': 'changed_A', 'testc': 'C'},
info['transient_sysmeta'])
# subsequent PUT replaces all transient_sysmeta and usermeta values
env = {'REQUEST_METHOD': 'PUT', 'swift.cache': cache}
hdrs = dict(self.new_transient_sysmeta_headers_2)
hdrs.update(self.original_meta_headers_2)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={'swift.cache': cache})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_meta_headers_2)
self._assertInHeaders(resp, self.new_transient_sysmeta_headers_2)
# meta from previous POST should have gone away...
self._assertNotInHeaders(resp, self.changed_transient_sysmeta_headers)
self._assertNotInHeaders(resp, self.new_transient_sysmeta_headers_1)
# sanity check that meta from first PUT did not re-appear...
self._assertNotInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp,
self.original_transient_sysmeta_headers_1)
self._assertNotInHeaders(resp,
self.original_transient_sysmeta_headers_2)
info = get_object_info(req.environ, self.app)
self.assertEqual(1, len(info.get('transient_sysmeta', ())))
self.assertEqual({'testd': 'D'}, info['transient_sysmeta'])
| swift-master | test/unit/proxy/test_sysmeta.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import socket
import unittest
from eventlet import Timeout
import six
from six.moves import urllib
from swift.common.constraints import CONTAINER_LISTING_LIMIT
from swift.common.swob import Request, bytes_to_wsgi, str_to_wsgi, wsgi_quote
from swift.common.utils import ShardRange, Timestamp, Namespace, \
NamespaceBoundList
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info, \
Controller, get_container_info, get_cache_key
from test import annotate_failure
from test.unit import fake_http_connect, FakeRing, FakeMemcache, \
make_timestamp_iter
from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
from test.debug_logger import debug_logger
from test.unit import patch_policies, mocked_http_conn
from test.unit.common.ring.test_ring import TestRingBase
from test.unit.proxy.test_server import node_error_count
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerController(TestRingBase):
CONTAINER_REPLICAS = 3
def setUp(self):
TestRingBase.setUp(self)
self.logger = debug_logger()
self.container_ring = FakeRing(replicas=self.CONTAINER_REPLICAS,
max_more_nodes=9)
self.app = proxy_server.Application(None,
logger=self.logger,
account_ring=FakeRing(),
container_ring=self.container_ring)
self.account_info = {
'status': 200,
'container_count': '10',
'total_object_count': '100',
'bytes': '1000',
'meta': {},
'sysmeta': {},
}
class FakeAccountInfoContainerController(
proxy_server.ContainerController):
def account_info(controller, *args, **kwargs):
patch_path = 'swift.proxy.controllers.base.get_account_info'
with mock.patch(patch_path) as mock_get_info:
mock_get_info.return_value = dict(self.account_info)
return super(FakeAccountInfoContainerController,
controller).account_info(
*args, **kwargs)
_orig_get_controller = self.app.get_controller
def wrapped_get_controller(*args, **kwargs):
with mock.patch('swift.proxy.server.ContainerController',
new=FakeAccountInfoContainerController):
return _orig_get_controller(*args, **kwargs)
self.app.get_controller = wrapped_get_controller
self.ts_iter = make_timestamp_iter()
def _make_callback_func(self, context):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
return callback
def _assert_responses(self, method, test_cases):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
for responses, expected in test_cases:
with mock.patch(
'swift.proxy.controllers.base.http_connect',
fake_http_connect(*responses)):
cache = FakeMemcache()
cache.set(get_cache_key('a'), {'status': 204})
req = Request.blank('/v1/a/c', environ={'swift.cache': cache})
resp = getattr(controller, method)(req)
self.assertEqual(expected,
resp.status_int,
'Expected %s but got %s. Failed case: %s' %
(expected, resp.status_int, str(responses)))
def test_container_info_got_cached(self):
memcache = FakeMemcache()
controller = proxy_server.ContainerController(self.app, 'a', 'c')
with mocked_http_conn(200, 200) as mock_conn:
req = Request.blank('/v1/a/c', {'swift.cache': memcache})
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
self.assertEqual(['/a', '/a/c'],
# requests are like /sdX/0/..
[r['path'][6:] for r in mock_conn.requests])
# Make sure it's in both swift.infocache and memcache
header_info = headers_to_container_info(resp.headers)
info_cache = resp.environ['swift.infocache']
self.assertIn("container/a/c", resp.environ['swift.infocache'])
self.assertEqual(header_info, info_cache['container/a/c'])
self.assertEqual(header_info, memcache.get('container/a/c'))
# The failure doesn't lead to cache eviction
errors = [500] * self.CONTAINER_REPLICAS * 2
with mocked_http_conn(*errors) as mock_conn:
req = Request.blank('/v1/a/c', {'swift.infocache': info_cache,
'swift.cache': memcache})
resp = controller.HEAD(req)
self.assertEqual(5, resp.status_int // 100)
self.assertEqual(['/a/c'] * self.CONTAINER_REPLICAS * 2,
# requests are like /sdX/0/..
[r['path'][6:] for r in mock_conn.requests])
self.assertIs(info_cache, resp.environ['swift.infocache'])
self.assertIn("container/a/c", resp.environ['swift.infocache'])
# NB: this is the *old* header_info, from the good req
self.assertEqual(header_info, info_cache['container/a/c'])
self.assertEqual(header_info, memcache.get('container/a/c'))
@mock.patch('swift.proxy.controllers.container.clear_info_cache')
@mock.patch.object(Controller, 'make_requests')
def test_container_cache_cleared_after_PUT(
self, mock_make_requests, mock_clear_info_cache):
parent_mock = mock.Mock()
parent_mock.attach_mock(mock_make_requests, 'make_requests')
parent_mock.attach_mock(mock_clear_info_cache, 'clear_info_cache')
controller = proxy_server.ContainerController(self.app, 'a', 'c')
callback = self._make_callback_func({})
req = Request.blank('/v1/a/c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
# Ensure cache is cleared after the PUT request
self.assertEqual(parent_mock.mock_calls[0][0], 'make_requests')
self.assertEqual(parent_mock.mock_calls[1][0], 'clear_info_cache')
def test_swift_owner(self):
owner_headers = {
'x-container-read': 'value', 'x-container-write': 'value',
'x-container-sync-key': 'value', 'x-container-sync-to': 'value'}
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertNotIn(key, resp.headers)
req = Request.blank('/v1/a/c', environ={'swift_owner': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertIn(key, resp.headers)
def test_reseller_admin(self):
reseller_internal_headers = {
get_sys_meta_prefix('container') + 'sharding': 'True'}
reseller_external_headers = {'x-container-sharding': 'on'}
controller = proxy_server.ContainerController(self.app, 'a', 'c')
# Normal users, even swift owners, can't set it
req = Request.blank('/v1/a/c', method='PUT',
headers=reseller_external_headers,
environ={'swift_owner': True})
with mocked_http_conn(*[201] * self.CONTAINER_REPLICAS) as mock_conn:
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_internal_headers:
for captured in mock_conn.requests:
self.assertNotIn(key.title(), captured['headers'])
req = Request.blank('/v1/a/c', method='POST',
headers=reseller_external_headers,
environ={'swift_owner': True})
with mocked_http_conn(*[204] * self.CONTAINER_REPLICAS) as mock_conn:
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_internal_headers:
for captured in mock_conn.requests:
self.assertNotIn(key.title(), captured['headers'])
req = Request.blank('/v1/a/c', environ={'swift_owner': True})
# Heck, they don't even get to know
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200,
headers=reseller_internal_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_external_headers:
self.assertNotIn(key, resp.headers)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200,
headers=reseller_internal_headers)):
resp = controller.GET(req)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_external_headers:
self.assertNotIn(key, resp.headers)
# But reseller admins can set it
req = Request.blank('/v1/a/c', method='PUT',
headers=reseller_external_headers,
environ={'reseller_request': True})
with mocked_http_conn(*[201] * self.CONTAINER_REPLICAS) as mock_conn:
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_internal_headers:
for captured in mock_conn.requests:
self.assertIn(key.title(), captured['headers'])
req = Request.blank('/v1/a/c', method='POST',
headers=reseller_external_headers,
environ={'reseller_request': True})
with mocked_http_conn(*[204] * self.CONTAINER_REPLICAS) as mock_conn:
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_internal_headers:
for captured in mock_conn.requests:
self.assertIn(key.title(), captured['headers'])
# And see that they have
req = Request.blank('/v1/a/c', environ={'reseller_request': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200,
headers=reseller_internal_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_external_headers:
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], 'True')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200,
headers=reseller_internal_headers)):
resp = controller.GET(req)
self.assertEqual(2, resp.status_int // 100)
for key in reseller_external_headers:
self.assertEqual(resp.headers[key], 'True')
def test_sys_meta_headers_PUT(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
self.assertEqual(context['method'], 'PUT')
self.assertIn(sys_meta_key, context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertIn(user_meta_key, context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.POST(req)
self.assertEqual(context['method'], 'POST')
self.assertIn(sys_meta_key, context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertIn(user_meta_key, context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_node_errors(self):
self.app.sort_nodes = lambda n, *args, **kwargs: n
for method in ('PUT', 'DELETE', 'POST'):
def test_status_map(statuses, expected):
self.app.error_limiter.stats.clear()
req = Request.blank('/v1/a/c', method=method)
with mocked_http_conn(*statuses) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
for req in fake_conn.requests:
self.assertEqual(req['method'], method)
self.assertTrue(req['path'].endswith('/a/c'))
base_status = [201] * self.CONTAINER_REPLICAS
# test happy path
test_status_map(list(base_status), 201)
for i in range(self.CONTAINER_REPLICAS):
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(self.CONTAINER_REPLICAS):
test_status_map(base_status[:i] + [503] + base_status[i:], 201)
for j in range(self.CONTAINER_REPLICAS):
expected = 1 if j == i else 0
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[j]), expected)
# timeout
test_status_map(base_status[:1] + [Timeout()] + base_status[1:],
201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[1]), 1)
# exception
test_status_map([Exception('kaboom!')] + base_status, 201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[0]), 1)
# insufficient storage
test_status_map(base_status[:2] + [507] + base_status[2:], 201)
self.assertEqual(node_error_count(
self.app, self.container_ring.devs[2]),
self.app.error_limiter.suppression_limit + 1)
def test_response_codes_for_GET(self):
nodes = self.app.container_ring.replicas
handoffs = self.app.request_node_count(nodes) - nodes
GET_TEST_CASES = [
([socket.error()] * (nodes + handoffs), 503),
([500] * (nodes + handoffs), 503),
([200], 200),
([404, 200], 200),
([404] * nodes + [200], 200),
([Timeout()] * nodes + [404] * handoffs, 503),
([Timeout()] * (nodes + handoffs), 503),
([Timeout()] * (nodes + handoffs - 1) + [404], 503),
([Timeout()] * (nodes - 1) + [404] * (handoffs + 1), 503),
([Timeout()] * (nodes - 2) + [404] * (handoffs + 2), 404),
([500] * (nodes - 1) + [404] * (handoffs + 1), 503),
([503, 200], 200),
([507, 200], 200),
]
failures = []
for case, expected in GET_TEST_CASES:
try:
with mocked_http_conn(*case):
req = Request.blank('/v1/a/c')
resp = req.get_response(self.app)
try:
self.assertEqual(resp.status_int, expected)
except AssertionError:
msg = '%r => %s (expected %s)' % (
case, resp.status_int, expected)
failures.append(msg)
except AssertionError as e:
# left over status failure
msg = '%r => %s' % (case, e)
failures.append(msg)
if failures:
self.fail('Some requests did not have expected response:\n' +
'\n'.join(failures))
# One more test, simulating all nodes being error-limited
class FakeIter(object):
num_primary_nodes = 3
def __iter__(self):
return iter([])
with mocked_http_conn(), mock.patch(
'swift.proxy.controllers.container.NodeIter',
return_value=FakeIter()):
req = Request.blank('/v1/a/c')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_handoff_has_deleted_database(self):
nodes = self.app.container_ring.replicas
handoffs = self.app.request_node_count(nodes) - nodes
status = [Timeout()] * nodes + [404] * handoffs
timestamps = tuple([None] * nodes + ['1'] + [None] * (handoffs - 1))
with mocked_http_conn(*status, timestamps=timestamps):
req = Request.blank('/v1/a/c')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201), 201),
((201, 201, 404), 201),
((201, 201, 503), 201),
((201, 404, 404), 404),
((201, 404, 503), 503),
((201, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
def _make_shard_objects(self, shard_range):
if six.PY2:
lower = ord(shard_range.lower.decode('utf8')[0]
if shard_range.lower else '@')
upper = ord(shard_range.upper.decode('utf8')[0]
if shard_range.upper else u'\U0001ffff')
else:
lower = ord(shard_range.lower[0] if shard_range.lower else '@')
upper = ord(shard_range.upper[0] if shard_range.upper
else '\U0001ffff')
objects = [{'name': six.unichr(i), 'bytes': i,
'hash': 'hash%s' % six.unichr(i),
'content_type': 'text/plain', 'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
for i in range(lower + 1, upper + 1)][:1024]
return objects
def _check_GET_shard_listing(self, mock_responses, expected_objects,
expected_requests, query_string='',
reverse=False, expected_status=200,
memcache=False):
# mock_responses is a list of tuples (status, json body, headers)
# expected objects is a list of dicts
# expected_requests is a list of tuples (path, hdrs dict, params dict)
# sanity check that expected objects is name ordered with no repeats
def name(obj):
return obj.get('name', obj.get('subdir'))
for (prev, next_) in zip(expected_objects, expected_objects[1:]):
if reverse:
self.assertGreater(name(prev), name(next_))
else:
self.assertLess(name(prev), name(next_))
container_path = '/v1/a/c' + query_string
codes = (resp[0] for resp in mock_responses)
bodies = iter([json.dumps(resp[1]).encode('ascii')
for resp in mock_responses])
exp_headers = [resp[2] for resp in mock_responses]
request = Request.blank(container_path)
if memcache:
# memcache exists, which causes backend to ignore constraints and
# reverse params for shard range GETs
request.environ['swift.cache'] = FakeMemcache()
with mocked_http_conn(
*codes, body_iter=bodies, headers=exp_headers) as fake_conn:
resp = request.get_response(self.app)
for backend_req in fake_conn.requests:
self.assertEqual(request.headers['X-Trans-Id'],
backend_req['headers']['X-Trans-Id'])
self.assertTrue(backend_req['headers']['User-Agent'].startswith(
'proxy-server'))
self.assertEqual(expected_status, resp.status_int)
if expected_status == 200:
actual_objects = json.loads(resp.body)
self.assertEqual(len(expected_objects), len(actual_objects))
self.assertEqual(expected_objects, actual_objects)
self.assertEqual(len(expected_requests), len(fake_conn.requests))
for i, ((exp_path, exp_headers, exp_params), req) in enumerate(
zip(expected_requests, fake_conn.requests)):
with annotate_failure('Request check at index %d.' % i):
# strip off /sdx/0/ from path
self.assertEqual(exp_path, req['path'][7:])
if six.PY2:
got_params = dict(urllib.parse.parse_qsl(req['qs'], True))
else:
got_params = dict(urllib.parse.parse_qsl(
req['qs'], True, encoding='latin1'))
self.assertEqual(dict(exp_params, format='json'), got_params)
for k, v in exp_headers.items():
self.assertIn(k, req['headers'])
self.assertEqual(v, req['headers'][k], k)
self.assertNotIn('X-Backend-Override-Delete', req['headers'])
if memcache:
self.assertEqual('sharded', req['headers'].get(
'X-Backend-Override-Shard-Name-Filter'))
else:
self.assertNotIn('X-Backend-Override-Shard-Name-Filter',
req['headers'])
return resp
def check_response(self, resp, root_resp_hdrs, expected_objects=None,
exp_sharding_state='sharded'):
info_hdrs = dict(root_resp_hdrs)
if expected_objects is None:
# default is to expect whatever the root container sent
expected_obj_count = root_resp_hdrs['X-Container-Object-Count']
expected_bytes_used = root_resp_hdrs['X-Container-Bytes-Used']
else:
expected_bytes_used = sum([o['bytes'] for o in expected_objects])
expected_obj_count = len(expected_objects)
info_hdrs['X-Container-Bytes-Used'] = expected_bytes_used
info_hdrs['X-Container-Object-Count'] = expected_obj_count
self.assertEqual(expected_bytes_used,
int(resp.headers['X-Container-Bytes-Used']))
self.assertEqual(expected_obj_count,
int(resp.headers['X-Container-Object-Count']))
self.assertEqual(exp_sharding_state,
resp.headers['X-Backend-Sharding-State'])
for k, v in root_resp_hdrs.items():
if k.lower().startswith('x-container-meta'):
self.assertEqual(v, resp.headers[k])
# check that info cache is correct for root container
info = get_container_info(resp.request.environ, self.app)
self.assertEqual(headers_to_container_info(info_hdrs), info)
def test_GET_sharded_container_no_memcache(self):
# Don't worry, ShardRange._encode takes care of unicode/bytes issues
shard_bounds = ('', 'ham', 'pie', u'\N{SNOWMAN}', u'\U0001F334', '')
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in zip(shard_bounds[:-1], shard_bounds[1:])]
sr_dicts = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i, _ in enumerate(shard_ranges)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
expected_objects = all_objects
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': num_all_objects - 1,
'X-Container-Bytes-Used': size_all_objects - 1,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
# GET all objects
# include some failed responses
mock_responses = [
# status, body, headers
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2]),
(200, sr_objs[3], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', limit=str(limit),
states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xd1\xb0', end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1] + sr_objs[2]
+ sr_objs[3])))), # 200
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET all objects - sharding, final shard range points back to root
root_range = ShardRange('a/c', Timestamp.now(), 'pie', '')
mock_responses = [
# status, body, headers
(200, sr_dicts[:2] + [dict(root_range)], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2] + sr_objs[3] + sr_objs[4], root_resp_hdrs)
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', limit=str(limit),
states='listing')), # 200
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(root_range.name,
{'X-Backend-Record-Type': 'object',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET all objects in reverse and *blank* limit
mock_responses = [
# status, body, headers
# NB: the backend returns reversed shard range list
(200, list(reversed(sr_dicts)), root_shard_resp_hdrs),
(200, list(reversed(sr_objs[4])), shard_resp_hdrs[4]),
(200, list(reversed(sr_objs[3])), shard_resp_hdrs[3]),
(200, list(reversed(sr_objs[2])), shard_resp_hdrs[2]),
(200, list(reversed(sr_objs[1])), shard_resp_hdrs[1]),
(200, list(reversed(sr_objs[0])), shard_resp_hdrs[0]),
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', reverse='true', limit='')),
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='\xf0\x9f\x8c\xb4', states='listing',
reverse='true', limit=str(limit))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xf0\x9f\x8c\xb5', end_marker='\xe2\x98\x83',
states='listing', reverse='true',
limit=str(limit - len(sr_objs[4])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xe2\x98\x84', end_marker='pie', states='listing',
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='q', end_marker='ham', states='listing',
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='i', end_marker='', states='listing', reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3] + sr_objs[2]
+ sr_objs[1])))), # 200
]
resp = self._check_GET_shard_listing(
mock_responses, list(reversed(expected_objects)),
expected_requests, query_string='?reverse=true&limit=',
reverse=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET with limit param
limit = len(sr_objs[0]) + len(sr_objs[1]) + 1
expected_objects = all_objects[:limit]
mock_responses = [
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2][:1], shard_resp_hdrs[2])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(limit=str(limit), states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(limit=str(limit), states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?limit=%s' % limit)
self.check_response(resp, root_resp_hdrs)
# GET with marker
marker = bytes_to_wsgi(sr_objs[3][2]['name'].encode('utf8'))
first_included = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 2)
limit = CONTAINER_LISTING_LIMIT
expected_objects = all_objects[first_included:]
mock_responses = [
(404, '', {}),
(200, sr_dicts[3:], root_shard_resp_hdrs),
(404, '', {}),
(200, sr_objs[3][2:], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=marker, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[3][2:])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s' % marker)
self.check_response(resp, root_resp_hdrs)
# GET with end marker
end_marker = bytes_to_wsgi(sr_objs[3][6]['name'].encode('utf8'))
first_excluded = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 6)
expected_objects = all_objects[:first_excluded]
mock_responses = [
(404, '', {}),
(200, sr_dicts[:4], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(404, '', {}),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2]),
(404, '', {}),
(200, sr_objs[3][:6], shard_resp_hdrs[3]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(end_marker=end_marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(end_marker=end_marker, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?end_marker=%s' % end_marker)
self.check_response(resp, root_resp_hdrs)
# GET with prefix
prefix = 'hat'
# they're all 1-character names; the important thing
# is which shards we query
expected_objects = []
mock_responses = [
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, [], shard_resp_hdrs[1]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(prefix=prefix, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(prefix=prefix, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(prefix=prefix, marker='', end_marker='pie\x00',
states='listing', limit=str(limit))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?prefix=%s' % prefix)
self.check_response(resp, root_resp_hdrs)
# marker and end_marker and limit
limit = 2
expected_objects = all_objects[first_included:first_excluded]
mock_responses = [
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, sr_objs[3][2:6], shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', limit=str(limit),
marker=marker, end_marker=end_marker)), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker=end_marker, states='listing',
limit=str(limit))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s&end_marker=%s&limit=%s'
% (marker, end_marker, limit))
self.check_response(resp, root_resp_hdrs)
# reverse with marker, end_marker, and limit
expected_objects.reverse()
mock_responses = [
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, list(reversed(sr_objs[3][2:6])), shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=end_marker, reverse='true', end_marker=marker,
limit=str(limit), states='listing',)), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=end_marker, end_marker=marker, states='listing',
limit=str(limit), reverse='true')),
]
self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s&end_marker=%s&limit=%s&reverse=true'
% (end_marker, marker, limit), reverse=True)
self.check_response(resp, root_resp_hdrs)
def test_GET_sharded_container_with_memcache(self):
# verify alternative code path in ContainerController when memcache is
# available...
shard_bounds = ('', 'ham', 'pie', u'\N{SNOWMAN}', u'\U0001F334', '')
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in zip(shard_bounds[:-1], shard_bounds[1:])]
sr_dicts = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i, _ in enumerate(shard_ranges)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
expected_objects = all_objects
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': num_all_objects - 1,
'X-Container-Bytes-Used': size_all_objects - 1,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0,
'X-Backend-Override-Shard-Name-Filter': 'true'}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
# GET all objects
# include some failed responses
mock_responses = [
# status, body, headers
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2]),
(200, sr_objs[3], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', limit=str(limit),
states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xd1\xb0', end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1] + sr_objs[2]
+ sr_objs[3])))), # 200
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests, memcache=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET all objects - sharding, final shard range points back to root
root_range = ShardRange('a/c', Timestamp.now(), 'pie', '')
mock_responses = [
# status, body, headers
(200, sr_dicts[:2] + [dict(root_range)], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2] + sr_objs[3] + sr_objs[4], root_resp_hdrs)
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(states='listing')), # 200
(shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', limit=str(limit),
states='listing')), # 200
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(root_range.name,
{'X-Backend-Record-Type': 'object',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests, memcache=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET all objects in reverse and *blank* limit
mock_responses = [
# status, body, headers
(200, list(sr_dicts), root_shard_resp_hdrs),
(200, list(reversed(sr_objs[4])), shard_resp_hdrs[4]),
(200, list(reversed(sr_objs[3])), shard_resp_hdrs[3]),
(200, list(reversed(sr_objs[2])), shard_resp_hdrs[2]),
(200, list(reversed(sr_objs[1])), shard_resp_hdrs[1]),
(200, list(reversed(sr_objs[0])), shard_resp_hdrs[0]),
]
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(states='listing', reverse='true', limit='')),
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='\xf0\x9f\x8c\xb4', states='listing',
reverse='true', limit=str(limit))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xf0\x9f\x8c\xb5', end_marker='\xe2\x98\x83',
states='listing', reverse='true',
limit=str(limit - len(sr_objs[4])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='\xe2\x98\x84', end_marker='pie', states='listing',
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='q', end_marker='ham', states='listing',
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='i', end_marker='', states='listing', reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3] + sr_objs[2]
+ sr_objs[1])))), # 200
]
resp = self._check_GET_shard_listing(
mock_responses, list(reversed(expected_objects)),
expected_requests, query_string='?reverse=true&limit=',
reverse=True, memcache=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# GET with limit param
limit = len(sr_objs[0]) + len(sr_objs[1]) + 1
expected_objects = all_objects[:limit]
mock_responses = [
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2][:1], shard_resp_hdrs[2])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(limit=str(limit), states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(limit=str(limit), states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?limit=%s' % limit, memcache=True)
self.check_response(resp, root_resp_hdrs)
# GET with marker
marker = bytes_to_wsgi(sr_objs[3][2]['name'].encode('utf8'))
first_included = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 2)
limit = CONTAINER_LISTING_LIMIT
expected_objects = all_objects[first_included:]
mock_responses = [
(404, '', {}),
(200, sr_dicts[3:], root_shard_resp_hdrs),
(404, '', {}),
(200, sr_objs[3][2:], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(marker=marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(marker=marker, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[3][2:])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s' % marker, memcache=True)
self.check_response(resp, root_resp_hdrs)
# GET with end marker
end_marker = bytes_to_wsgi(sr_objs[3][6]['name'].encode('utf8'))
first_excluded = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 6)
expected_objects = all_objects[:first_excluded]
mock_responses = [
(404, '', {}),
(200, sr_dicts[:4], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(404, '', {}),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2]),
(404, '', {}),
(200, sr_objs[3][:6], shard_resp_hdrs[3]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(end_marker=end_marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(end_marker=end_marker, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?end_marker=%s' % end_marker, memcache=True)
self.check_response(resp, root_resp_hdrs)
# GET with prefix
prefix = 'hat'
# they're all 1-character names; the important thing
# is which shards we query
expected_objects = []
mock_responses = [
(404, '', {}),
(200, sr_dicts, root_shard_resp_hdrs),
(200, [], shard_resp_hdrs[1]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(prefix=prefix, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(prefix=prefix, states='listing')), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 404
dict(prefix=prefix, marker='', end_marker='pie\x00',
states='listing', limit=str(limit))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?prefix=%s' % prefix, memcache=True)
self.check_response(resp, root_resp_hdrs)
# marker and end_marker and limit
limit = 2
expected_objects = all_objects[first_included:first_excluded]
mock_responses = [
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, sr_objs[3][2:6], shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(states='listing', limit=str(limit),
marker=marker, end_marker=end_marker)), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=marker, end_marker=end_marker, states='listing',
limit=str(limit))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s&end_marker=%s&limit=%s'
% (marker, end_marker, limit), memcache=True)
self.check_response(resp, root_resp_hdrs)
# reverse with marker, end_marker, and limit
expected_objects.reverse()
mock_responses = [
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, list(reversed(sr_objs[3][2:6])), shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto',
'X-Backend-Override-Shard-Name-Filter': 'sharded'},
dict(marker=end_marker, reverse='true', end_marker=marker,
limit=str(limit), states='listing',)), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'}, # 200
dict(marker=end_marker, end_marker=marker, states='listing',
limit=str(limit), reverse='true')),
]
self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?marker=%s&end_marker=%s&limit=%s&reverse=true'
% (end_marker, marker, limit), reverse=True, memcache=True)
self.check_response(resp, root_resp_hdrs)
def _do_test_GET_sharded_container_with_deleted_shards(self, shard_specs):
# verify that if a shard fails to return its listing component then the
# client response is 503
shard_bounds = (('a', 'b'), ('b', 'c'), ('c', ''))
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i, _ in enumerate(shard_ranges)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': 6,
'X-Container-Bytes-Used': 12,
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
]
for i, spec in enumerate(shard_specs):
if spec == 200:
mock_responses.append((200, sr_objs[i], shard_resp_hdrs[i]))
else:
mock_responses.extend(
[(spec, '', {})] * 2 * self.CONTAINER_REPLICAS)
codes = (resp[0] for resp in mock_responses)
bodies = iter([json.dumps(resp[1]).encode('ascii')
for resp in mock_responses])
exp_headers = [resp[2] for resp in mock_responses]
request = Request.blank('/v1/a/c')
with mocked_http_conn(
*codes, body_iter=bodies, headers=exp_headers) as fake_conn:
resp = request.get_response(self.app)
self.assertEqual(len(mock_responses), len(fake_conn.requests))
return request, resp
def test_GET_sharded_container_with_deleted_shard(self):
req, resp = self._do_test_GET_sharded_container_with_deleted_shards(
[404])
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(['Failed to get container listing from '
'%s: 404' % req.path_qs],
warnings)
self.assertEqual(resp.status_int, 503)
errors = self.logger.get_lines_for_level('error')
self.assertEqual(
['Aborting listing from shards due to bad response: %s'
% ([404])], errors)
def test_GET_sharded_container_with_mix_ok_and_deleted_shard(self):
req, resp = self._do_test_GET_sharded_container_with_deleted_shards(
[200, 200, 404])
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(['Failed to get container listing from '
'%s: 404' % req.path_qs], warnings)
self.assertEqual(resp.status_int, 503)
errors = self.logger.get_lines_for_level('error')
self.assertEqual(
['Aborting listing from shards due to bad response: %s'
% ([200, 200, 404],)], errors)
def test_GET_sharded_container_mix_ok_and_unavailable_shards(self):
req, resp = self._do_test_GET_sharded_container_with_deleted_shards(
[200, 200, 503])
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(['Failed to get container listing from '
'%s: 503' % req.path_qs], warnings[-1:])
self.assertEqual(resp.status_int, 503)
errors = self.logger.get_lines_for_level('error')
self.assertEqual(
['Aborting listing from shards due to bad response: %s'
% ([200, 200, 503],)], errors[-1:])
def test_GET_sharded_container_with_delimiter(self):
shard_bounds = (('', 'ha/ppy'), ('ha/ppy', 'ha/ptic'),
('ha/ptic', 'ham'), ('ham', 'pie'), ('pie', ''))
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper.replace('/', ''),
Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
shard_resp_hdrs = {'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': 2,
'X-Container-Bytes-Used': 4,
'X-Backend-Storage-Policy-Index': 0}
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': 6,
'X-Container-Bytes-Used': 12,
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
sr_0_obj = {'name': 'apple',
'bytes': 1,
'hash': 'hash',
'content_type': 'text/plain',
'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
sr_5_obj = {'name': 'pumpkin',
'bytes': 1,
'hash': 'hash',
'content_type': 'text/plain',
'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
subdir = {'subdir': 'ha/'}
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, [sr_0_obj, subdir], shard_resp_hdrs),
(200, [], shard_resp_hdrs),
(200, [], shard_resp_hdrs),
(200, [sr_5_obj], shard_resp_hdrs)
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', delimiter='/')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ha/ppy\x00', limit=str(limit),
states='listing', delimiter='/')), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='ha/', end_marker='ham\x00', states='listing',
limit=str(limit - 2), delimiter='/')), # 200
(shard_ranges[3].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='ha/', end_marker='pie\x00', states='listing',
limit=str(limit - 2), delimiter='/')), # 200
(shard_ranges[4].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='ha/', end_marker='', states='listing',
limit=str(limit - 2), delimiter='/')), # 200
]
expected_objects = [sr_0_obj, subdir, sr_5_obj]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?delimiter=/')
self.check_response(resp, root_resp_hdrs)
def test_GET_sharded_container_with_delimiter_and_reverse(self):
shard_points = ('', 'ha.d', 'ha/ppy', 'ha/ptic', 'ham', 'pie', '')
shard_bounds = tuple(zip(shard_points, shard_points[1:]))
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper.replace('/', ''),
Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
shard_resp_hdrs = {'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': 2,
'X-Container-Bytes-Used': 4,
'X-Backend-Storage-Policy-Index': 0}
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': 6,
'X-Container-Bytes-Used': 12,
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
sr_0_obj = {'name': 'apple',
'bytes': 1,
'hash': 'hash',
'content_type': 'text/plain',
'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
sr_1_obj = {'name': 'ha.ggle',
'bytes': 1,
'hash': 'hash',
'content_type': 'text/plain',
'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
sr_5_obj = {'name': 'pumpkin',
'bytes': 1,
'hash': 'hash',
'content_type': 'text/plain',
'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
subdir = {'subdir': 'ha/'}
mock_responses = [
# status, body, headers
(200, list(reversed(sr_dicts)), root_shard_resp_hdrs),
(200, [sr_5_obj], shard_resp_hdrs),
(200, [], shard_resp_hdrs),
(200, [subdir], shard_resp_hdrs),
(200, [sr_1_obj], shard_resp_hdrs),
(200, [sr_0_obj], shard_resp_hdrs),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', delimiter='/', reverse='on')), # 200
(shard_ranges[5].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='pie', states='listing',
limit=str(limit), delimiter='/', reverse='on')), # 200
(shard_ranges[4].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='pumpkin', end_marker='ham', states='listing',
limit=str(limit - 1), delimiter='/', reverse='on')), # 200
(shard_ranges[3].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='pumpkin', end_marker='ha/ptic', states='listing',
limit=str(limit - 1), delimiter='/', reverse='on')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='ha/', end_marker='ha.d', limit=str(limit - 2),
states='listing', delimiter='/', reverse='on')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='ha.ggle', end_marker='', limit=str(limit - 3),
states='listing', delimiter='/', reverse='on')), # 200
]
expected_objects = [sr_5_obj, subdir, sr_1_obj, sr_0_obj]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?delimiter=/&reverse=on', reverse=True)
self.check_response(resp, root_resp_hdrs)
def test_GET_sharded_container_shard_redirects_to_root(self):
# check that if the root redirects listing to a shard, but the shard
# returns the root shard (e.g. it was the final shard to shrink into
# the root) objects are requested from the root, rather than a loop.
# single shard spanning entire namespace
shard_sr = ShardRange('.shards_a/c_xyz', Timestamp.now(), '', '')
all_objects = self._make_shard_objects(shard_sr)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
# when shrinking the final shard will return the root shard range into
# which it is shrinking
shard_resp_hdrs = {
'X-Backend-Sharding-State': 'sharding',
'X-Container-Object-Count': 0,
'X-Container-Bytes-Used': 0,
'X-Backend-Storage-Policy-Index': 0,
'X-Backend-Record-Type': 'shard'
}
# root still thinks it has a shard
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
root_sr = ShardRange('a/c', Timestamp.now(), '', '')
mock_responses = [
# status, body, headers
(200, [dict(shard_sr)], root_shard_resp_hdrs), # from root
(200, [dict(root_sr)], shard_resp_hdrs), # from shard
(200, all_objects, root_resp_hdrs), # from root
]
expected_requests = [
# path, headers, params
# first request to root should specify auto record type
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')),
# request to shard should specify auto record type
(wsgi_quote(str_to_wsgi(shard_sr.name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='', limit=str(limit),
states='listing')), # 200
# second request to root should specify object record type
('a/c', {'X-Backend-Record-Type': 'object'},
dict(marker='', end_marker='', limit=str(limit))), # 200
]
expected_objects = all_objects
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
self.assertEqual(
[('a', 'c'), ('.shards_a', 'c_xyz')],
resp.request.environ.get('swift.shard_listing_history'))
lines = [line for line in self.app.logger.get_lines_for_level('debug')
if line.startswith('Found 1024 objects in shard')]
self.assertEqual(2, len(lines), lines)
self.assertIn("(state=sharded), total = 1024", lines[0]) # shard->root
self.assertIn("(state=sharding), total = 1024", lines[1]) # shard
def test_GET_sharded_container_shard_redirects_between_shards(self):
# check that if one shard redirects listing to another shard that
# somehow redirects listing back to the first shard, then we will break
# out of the loop (this isn't an expected scenario, but could perhaps
# happen if multiple conflicting shard-shrinking decisions are made)
shard_bounds = ('', 'a', 'b', '')
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in zip(shard_bounds[:-1], shard_bounds[1:])]
self.assertEqual([
'.shards_a/c_a',
'.shards_a/c_b',
'.shards_a/c_',
], [sr.name for sr in shard_ranges])
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Backend-Storage-Policy-Index': 0,
'X-Backend-Record-Storage-Policy-Index': 0,
'X-Backend-Record-Type': 'shard',
}
shard_resp_hdrs = {'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': 2,
'X-Container-Bytes-Used': 4,
'X-Backend-Storage-Policy-Index': 0,
'X-Backend-Record-Storage-Policy-Index': 0,
}
shrinking_resp_hdrs = {
'X-Backend-Sharding-State': 'sharded',
'X-Backend-Record-Type': 'shard',
'X-Backend-Storage-Policy-Index': 0
}
limit = CONTAINER_LISTING_LIMIT
mock_responses = [
# status, body, headers
(200, sr_dicts, root_resp_hdrs), # from root
(200, sr_objs[0], shard_resp_hdrs), # objects from 1st shard
(200, [sr_dicts[2]], shrinking_resp_hdrs), # 2nd points to 3rd
(200, [sr_dicts[1]], shrinking_resp_hdrs), # 3rd points to 2nd
(200, sr_objs[1], shard_resp_hdrs), # objects from 2nd
(200, sr_objs[2], shard_resp_hdrs), # objects from 3rd
]
expected_requests = [
# each list item is tuple (path, headers, params)
# request to root
# context GET(a/c)
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')),
# request to 1st shard as per shard list from root;
# context GET(a/c);
# end_marker dictated by 1st shard range upper bound
('.shards_a/c_a', {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='a\x00', states='listing',
limit=str(limit))), # 200
# request to 2nd shard as per shard list from root;
# context GET(a/c);
# end_marker dictated by 2nd shard range upper bound
('.shards_a/c_b', {'X-Backend-Record-Type': 'auto'},
dict(marker='a', end_marker='b\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
# request to 3rd shard as per shard list from *2nd shard*;
# new context GET(a/c)->GET(.shards_a/c_b);
# end_marker still dictated by 2nd shard range upper bound
('.shards_a/c_', {'X-Backend-Record-Type': 'auto'},
dict(marker='a', end_marker='b\x00', states='listing',
limit=str(
limit - len(sr_objs[0])))),
# request to 2nd shard as per shard list from *3rd shard*; this one
# should specify record type object;
# new context GET(a/c)->GET(.shards_a/c_b)->GET(.shards_a/c_);
# end_marker still dictated by 2nd shard range upper bound
('.shards_a/c_b', {'X-Backend-Record-Type': 'object'},
dict(marker='a', end_marker='b\x00',
limit=str(
limit - len(sr_objs[0])))),
# request to 3rd shard *as per shard list from root*; this one
# should specify record type object;
# context GET(a/c);
# end_marker dictated by 3rd shard range upper bound
('.shards_a/c_', {'X-Backend-Record-Type': 'object'},
dict(marker='b', end_marker='',
limit=str(
limit - len(sr_objs[0]) - len(sr_objs[1])))), # 200
]
resp = self._check_GET_shard_listing(
mock_responses, all_objects, expected_requests)
self.check_response(resp, root_resp_hdrs,
expected_objects=all_objects)
self.assertEqual(
[('a', 'c'), ('.shards_a', 'c_b'), ('.shards_a', 'c_')],
resp.request.environ.get('swift.shard_listing_history'))
def test_GET_sharded_container_overlapping_shards(self):
# verify ordered listing even if unexpected overlapping shard ranges
shard_bounds = (('', 'ham', ShardRange.CLEAVED),
('', 'pie', ShardRange.ACTIVE),
('lemon', '', ShardRange.ACTIVE))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper,
state=state)
for lower, upper, state in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
# pretend root object stats are not yet updated
'X-Container-Object-Count': num_all_objects - 1,
'X-Container-Bytes-Used': size_all_objects - 1,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
# forwards listing
# expect subset of second shard range
objs_1 = [o for o in sr_objs[1] if o['name'] > sr_objs[0][-1]['name']]
# expect subset of third shard range
objs_2 = [o for o in sr_objs[2] if o['name'] > sr_objs[1][-1]['name']]
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, objs_1, shard_resp_hdrs[1]),
(200, objs_2, shard_resp_hdrs[2])
]
# NB marker always advances to last object name
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + objs_1)))) # 200
]
expected_objects = sr_objs[0] + objs_1 + objs_2
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
# reverse listing
# expect subset of third shard range
objs_0 = [o for o in sr_objs[0] if o['name'] < sr_objs[1][0]['name']]
# expect subset of second shard range
objs_1 = [o for o in sr_objs[1] if o['name'] < sr_objs[2][0]['name']]
mock_responses = [
# status, body, headers
(200, list(reversed(sr_dicts)), root_shard_resp_hdrs),
(200, list(reversed(sr_objs[2])), shard_resp_hdrs[2]),
(200, list(reversed(objs_1)), shard_resp_hdrs[1]),
(200, list(reversed(objs_0)), shard_resp_hdrs[0]),
]
# NB marker always advances to last object name
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', reverse='true')), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='lemon', states='listing',
limit=str(limit),
reverse='true')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='m', end_marker='', reverse='true', states='listing',
limit=str(limit - len(sr_objs[2])))), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='A', end_marker='', reverse='true', states='listing',
limit=str(limit - len(sr_objs[2] + objs_1)))) # 200
]
expected_objects = list(reversed(objs_0 + objs_1 + sr_objs[2]))
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
query_string='?reverse=true', reverse=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
def test_GET_sharded_container_gap_in_shards_no_memcache(self):
# verify ordered listing even if unexpected gap between shard ranges
shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, all_objects, expected_requests, memcache=False)
# root object count will be overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
self.assertNotIn('swift.cache', resp.request.environ)
def test_GET_sharding_container_gap_in_shards_memcache(self):
# verify ordered listing even if unexpected gap between shard ranges;
# root is sharding so shard ranges are not cached
shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharding',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
# NB end_markers are upper of the current available shard range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, all_objects, expected_requests, memcache=True)
# root object count will be overridden by actual length of listing
self.check_response(resp, root_resp_hdrs,
exp_sharding_state='sharding')
self.assertIn('swift.cache', resp.request.environ)
self.assertNotIn('shard-listing-v2/a/c',
resp.request.environ['swift.cache'].store)
def test_GET_sharded_container_gap_in_shards_memcache(self):
# verify ordered listing even if unexpected gap between shard ranges
shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
# NB compaction of shard range data to cached bounds loses the gaps, so
# end_markers are lower of the next available shard range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='onion\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='rhubarb\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, all_objects, expected_requests, memcache=True)
# root object count will be overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
self.assertIn('swift.cache', resp.request.environ)
self.assertIn('shard-listing-v2/a/c',
resp.request.environ['swift.cache'].store)
# NB compact bounds in cache do not reveal the gap in shard ranges
self.assertEqual(
[['', '.shards_a/c_ham'],
['onion', '.shards_a/c_pie'],
['rhubarb', '.shards_a/c_']],
resp.request.environ['swift.cache'].store['shard-listing-v2/a/c'])
def test_GET_sharded_container_empty_shard(self):
# verify ordered listing when a shard is empty
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
empty_shard_resp_hdrs = {
'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': 0,
'X-Container-Bytes-Used': 0,
'X-Container-Meta-Flavour': 'flavour',
'X-Backend-Storage-Policy-Index': 0}
# empty first shard range
all_objects = sr_objs[1] + sr_objs[2]
size_all_objects = sum([obj['bytes'] for obj in all_objects])
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': len(all_objects),
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, [], empty_shard_resp_hdrs),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker does not advance until an object is in the listing
limit = CONTAINER_LISTING_LIMIT
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='pie\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, sr_objs[1] + sr_objs[2], expected_requests)
self.check_response(resp, root_resp_hdrs)
# empty last shard range, reverse
all_objects = sr_objs[0] + sr_objs[1]
size_all_objects = sum([obj['bytes'] for obj in all_objects])
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': len(all_objects),
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, list(reversed(sr_dicts)), root_shard_resp_hdrs),
(200, [], empty_shard_resp_hdrs),
(200, list(reversed(sr_objs[1])), shard_resp_hdrs[1]),
(200, list(reversed(sr_objs[0])), shard_resp_hdrs[0]),
]
limit = CONTAINER_LISTING_LIMIT
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', reverse='true')), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='pie', states='listing',
limit=str(limit), reverse='true')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham', states='listing',
limit=str(limit), reverse='true')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker=sr_objs[1][0]['name'], end_marker='',
states='listing', reverse='true',
limit=str(limit - len(sr_objs[1])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, list(reversed(sr_objs[0] + sr_objs[1])),
expected_requests, query_string='?reverse=true', reverse=True)
self.check_response(resp, root_resp_hdrs)
# empty second shard range
all_objects = sr_objs[0] + sr_objs[2]
size_all_objects = sum([obj['bytes'] for obj in all_objects])
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': len(all_objects),
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, [], empty_shard_resp_hdrs),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
limit = CONTAINER_LISTING_LIMIT
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0])))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, sr_objs[0] + sr_objs[2], expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
# marker in empty second range
mock_responses = [
# status, body, headers
(200, sr_dicts[1:], root_shard_resp_hdrs),
(200, [], empty_shard_resp_hdrs),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker unchanged when getting from third range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', marker='koolaid')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='koolaid', end_marker='pie\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='koolaid', end_marker='', states='listing',
limit=str(limit))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, sr_objs[2], expected_requests,
query_string='?marker=koolaid')
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
# marker in empty second range, reverse
mock_responses = [
# status, body, headers
(200, list(reversed(sr_dicts[:2])), root_shard_resp_hdrs),
(200, [], empty_shard_resp_hdrs),
(200, list(reversed(sr_objs[0])), shard_resp_hdrs[2])
]
# NB marker unchanged when getting from first range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', marker='koolaid', reverse='true')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='koolaid', end_marker='ham', reverse='true',
states='listing', limit=str(limit))), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='koolaid', end_marker='', reverse='true',
states='listing', limit=str(limit))) # 200
]
resp = self._check_GET_shard_listing(
mock_responses, list(reversed(sr_objs[0])), expected_requests,
query_string='?marker=koolaid&reverse=true', reverse=True)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
def _check_GET_sharded_container_shard_error(self, error):
# verify ordered listing when a shard is empty
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('lemon', ''))
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
# empty second shard range
sr_objs[1] = []
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0])] + \
[(error, [], {})] * 2 * self.CONTAINER_REPLICAS
# NB marker always advances to last object name
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit)))] \
+ [(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0]))))
] * 2 * self.CONTAINER_REPLICAS
self._check_GET_shard_listing(
mock_responses, all_objects, expected_requests,
expected_status=503)
def test_GET_sharded_container_shard_errors(self):
self._check_GET_sharded_container_shard_error(404)
self._check_GET_sharded_container_shard_error(500)
def test_GET_sharded_container_sharding_shard(self):
# one shard is in process of sharding
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
shard_1_shard_resp_hdrs = dict(shard_resp_hdrs[1])
shard_1_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
# second shard is sharding and has cleaved two out of three sub shards
shard_resp_hdrs[1]['X-Backend-Sharding-State'] = 'sharding'
sub_shard_bounds = (('ham', 'juice'), ('juice', 'lemon'))
sub_shard_ranges = [
ShardRange('a/c_sub_' + upper, Timestamp.now(), lower, upper)
for lower, upper in sub_shard_bounds]
sub_sr_dicts = [dict(sr) for sr in sub_shard_ranges]
sub_sr_objs = [self._make_shard_objects(sr) for sr in sub_shard_ranges]
sub_shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sub_sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sub_sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(2)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sub_sr_dicts + [sr_dicts[1]], shard_1_shard_resp_hdrs),
(200, sub_sr_objs[0], sub_shard_resp_hdrs[0]),
(200, sub_sr_objs[1], sub_shard_resp_hdrs[1]),
(200, sr_objs[1][len(sub_sr_objs[0] + sub_sr_objs[1]):],
shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
expected_requests = [
# get root shard ranges
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
# get first shard objects
(shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
# get second shard sub-shard ranges
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
# get first sub-shard objects
(sub_shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='juice\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
# get second sub-shard objects
(sub_shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='j', end_marker='lemon\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sub_sr_objs[0])))),
# get remainder of first shard objects
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'object',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='l', end_marker='pie\x00',
limit=str(limit - len(sr_objs[0] + sub_sr_objs[0] +
sub_sr_objs[1])))), # 200
# get third shard objects
(shard_ranges[2].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
expected_objects = (
sr_objs[0] + sub_sr_objs[0] + sub_sr_objs[1] +
sr_objs[1][len(sub_sr_objs[0] + sub_sr_objs[1]):] + sr_objs[2])
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())
])
def test_GET_sharded_container_sharding_shard_mixed_policies(self):
# scenario: one shard is in process of sharding, shards have different
# policy than root, expect listing to always request root policy index
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
shard_ranges = [
ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 1,
'X-Backend-Record-Storage-Policy-Index': 0}
for i in range(3)]
shard_1_shard_resp_hdrs = dict(shard_resp_hdrs[1])
shard_1_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
# second shard is sharding and has cleaved two out of three sub shards
shard_resp_hdrs[1]['X-Backend-Sharding-State'] = 'sharding'
sub_shard_bounds = (('ham', 'juice'), ('juice', 'lemon'))
sub_shard_ranges = [
ShardRange('a/c_sub_' + upper, Timestamp.now(), lower, upper)
for lower, upper in sub_shard_bounds]
sub_sr_dicts = [dict(sr) for sr in sub_shard_ranges]
sub_sr_objs = [self._make_shard_objects(sr) for sr in sub_shard_ranges]
sub_shard_resp_hdrs = [
{'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sub_sr_objs[i]),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sub_sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 1,
'X-Backend-Record-Storage-Policy-Index': 0}
for i in range(2)]
all_objects = []
for objects in sr_objs:
all_objects.extend(objects)
size_all_objects = sum([obj['bytes'] for obj in all_objects])
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
'X-Backend-Storage-Policy-Index': 0}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sub_sr_dicts + [sr_dicts[1]], shard_1_shard_resp_hdrs),
(200, sub_sr_objs[0], sub_shard_resp_hdrs[0]),
(200, sub_sr_objs[1], sub_shard_resp_hdrs[1]),
(200, sr_objs[1][len(sub_sr_objs[0] + sub_sr_objs[1]):],
shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
expected_requests = [
# get root shard ranges
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
# get first shard objects
(shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))), # 200
# get second shard sub-shard ranges
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
# get first sub-shard objects
(sub_shard_ranges[0].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='h', end_marker='juice\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
# get second sub-shard objects
(sub_shard_ranges[1].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='j', end_marker='lemon\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sub_sr_objs[0])))),
# get remainder of second shard objects
(shard_ranges[1].name,
{'X-Backend-Record-Type': 'object',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='l', end_marker='pie\x00',
limit=str(limit - len(sr_objs[0] + sub_sr_objs[0] +
sub_sr_objs[1])))), # 200
# get third shard objects
(shard_ranges[2].name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '0'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
]
expected_objects = (
sr_objs[0] + sub_sr_objs[0] + sub_sr_objs[1] +
sr_objs[1][len(sub_sr_objs[0] + sub_sr_objs[1]):] + sr_objs[2])
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests)
# root object count will overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing()),
StoragePolicy(1, 'one', False, object_ring=FakeRing())
])
def test_GET_sharded_container_mixed_policies_error(self):
# scenario: shards have different policy than root, listing requests
# root policy index but shards not upgraded and respond with their own
# policy index
def do_test(shard_policy):
# only need first shard for this test...
sr = ShardRange('.shards_a/c_pie', Timestamp.now(), '', 'pie')
sr_objs = self._make_shard_objects(sr)
shard_resp_hdrs = {
'X-Backend-Sharding-State': 'unsharded',
'X-Container-Object-Count': len(sr_objs),
'X-Container-Bytes-Used':
sum([obj['bytes'] for obj in sr_objs]),
}
if shard_policy is not None:
shard_resp_hdrs['X-Backend-Storage-Policy-Index'] = \
shard_policy
size_all_objects = sum([obj['bytes'] for obj in sr_objs])
num_all_objects = len(sr_objs)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
'X-Container-Meta-Flavour': 'peach',
# NB root policy 1 differes from shard policy
'X-Backend-Storage-Policy-Index': 1}
root_shard_resp_hdrs = dict(root_resp_hdrs)
root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
mock_responses = [
# status, body, headers
(200, [dict(sr)], root_shard_resp_hdrs),
(200, sr_objs, shard_resp_hdrs),
]
# NB marker always advances to last object name
expected_requests = [
# get root shard ranges
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
# get first shard objects
(sr.name,
{'X-Backend-Record-Type': 'auto',
'X-Backend-Storage-Policy-Index': '1'},
dict(marker='', end_marker='pie\x00', states='listing',
limit=str(limit))), # 200
# error to client; no request for second shard objects
]
self._check_GET_shard_listing(
mock_responses, [], expected_requests,
expected_status=503)
do_test(0)
do_test(None)
def _build_request(self, headers, params, infocache=None):
# helper to make a GET request with caches set in environ
query_string = '?' + '&'.join('%s=%s' % (k, v)
for k, v in params.items())
container_path = '/v1/a/c' + query_string
request = Request.blank(container_path, headers=headers)
request.environ['swift.cache'] = self.memcache
request.environ['swift.infocache'] = infocache if infocache else {}
return request
def _check_response(self, resp, exp_shards, extra_hdrs):
# helper to check a shard listing response
actual_shards = json.loads(resp.body)
self.assertEqual(exp_shards, actual_shards)
exp_hdrs = dict(self.root_resp_hdrs)
# x-put-timestamp is sent from backend but removed in proxy base
# controller GETorHEAD_base so not expected in response from proxy
exp_hdrs.pop('X-Put-Timestamp')
self.assertIn('X-Timestamp', resp.headers)
actual_timestamp = resp.headers.pop('X-Timestamp')
exp_timestamp = exp_hdrs.pop('X-Timestamp')
self.assertEqual(Timestamp(exp_timestamp),
Timestamp(actual_timestamp))
exp_hdrs.update(extra_hdrs)
exp_hdrs.update(
{'X-Storage-Policy': 'zero', # added in container controller
'Content-Length':
str(len(json.dumps(exp_shards).encode('ascii'))),
}
)
# we expect this header to be removed by proxy
exp_hdrs.pop('X-Backend-Override-Shard-Name-Filter', None)
for ignored in ('x-account-container-count', 'x-object-meta-test',
'x-delete-at', 'etag', 'x-works'):
# FakeConn adds these
resp.headers.pop(ignored, None)
self.assertEqual(exp_hdrs, resp.headers)
def _capture_backend_request(self, req, resp_status, resp_body,
resp_extra_hdrs, num_resp=1):
self.assertGreater(num_resp, 0) # sanity check
resp_hdrs = dict(self.root_resp_hdrs)
resp_hdrs.update(resp_extra_hdrs)
resp_status = [resp_status] * num_resp
with mocked_http_conn(
*resp_status, body_iter=[resp_body] * num_resp,
headers=[resp_hdrs] * num_resp) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp_status[0], resp.status_int)
self.assertEqual(num_resp, len(fake_conn.requests))
return fake_conn.requests[0], resp
def _check_backend_req(self, req, backend_req, extra_params=None,
extra_hdrs=None):
self.assertEqual('a/c', backend_req['path'][7:])
expected_params = {'states': 'listing', 'format': 'json'}
if extra_params:
expected_params.update(extra_params)
if six.PY2:
backend_params = dict(urllib.parse.parse_qsl(
backend_req['qs'], True))
else:
backend_params = dict(urllib.parse.parse_qsl(
backend_req['qs'], True, encoding='latin1'))
self.assertEqual(expected_params, backend_params)
backend_hdrs = backend_req['headers']
self.assertIsNotNone(backend_hdrs.pop('Referer', None))
self.assertIsNotNone(backend_hdrs.pop('X-Timestamp', None))
self.assertTrue(backend_hdrs.pop('User-Agent', '').startswith(
'proxy-server'))
expected_headers = {
'Connection': 'close',
'Host': 'localhost:80',
'X-Trans-Id': req.headers['X-Trans-Id']}
if extra_hdrs:
expected_headers.update(extra_hdrs)
self.assertEqual(expected_headers, backend_hdrs)
for k, v in expected_headers.items():
self.assertIn(k, backend_hdrs)
self.assertEqual(v, backend_hdrs.get(k))
def _setup_shard_range_stubs(self):
self.memcache = FakeMemcache()
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
self.ns_dicts = [{'name': '.shards_a/c_%s' % upper,
'lower': lower,
'upper': upper}
for lower, upper in shard_bounds]
self.namespaces = [Namespace(**ns) for ns in self.ns_dicts]
self.ns_bound_list = NamespaceBoundList.parse(self.namespaces)
self.sr_dicts = [dict(ShardRange(timestamp=Timestamp.now(), **ns))
for ns in self.ns_dicts]
self._stub_shards_dump = json.dumps(self.sr_dicts).encode('ascii')
self.root_resp_hdrs = {
'Accept-Ranges': 'bytes',
'Content-Type': 'application/json',
'Last-Modified': 'Thu, 01 Jan 1970 00:00:03 GMT',
'X-Backend-Timestamp': '2',
'X-Backend-Put-Timestamp': '3',
'X-Backend-Delete-Timestamp': '0',
'X-Backend-Status-Changed-At': '0',
'X-Timestamp': '2',
'X-Put-Timestamp': '3',
'X-Container-Object-Count': '6',
'X-Container-Bytes-Used': '12',
'X-Backend-Storage-Policy-Index': '0'}
def _do_test_caching(self, record_type, exp_recheck_listing):
# this test gets shard ranges into cache and then reads from cache
sharding_state = 'sharded'
self.memcache.delete_all()
# container is sharded but proxy does not have that state cached;
# expect a backend request and expect shard ranges to be cached
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump,
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state,
'X-Backend-Override-Shard-Name-Filter': 'true'})
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
cache_key = 'shard-listing-v2/a/c'
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[2][1][1]['sharding_state'])
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.miss',
'container.shard_listing.cache.bypass.200'])
# container is sharded and proxy has that state cached, but
# no shard ranges cached; expect a cache miss and write-back
self.memcache.delete(cache_key)
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump,
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state,
'X-Backend-Override-Shard-Name-Filter': 'true'})
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get(cache_key, raise_on_error=True),
mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
# Since there was a backend request, we go ahead and cache
# container info, too
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.hit',
'container.shard_listing.cache.miss.200'])
# container is sharded and proxy does have that state cached and
# also has shard ranges cached; expect a read from cache
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
resp = req.get_response(self.app)
self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get(cache_key, raise_on_error=True)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.hit',
'container.shard_listing.cache.hit'])
# if there's a chance to skip cache, maybe we go to disk again...
self.memcache.clear_calls()
self.logger.clear()
self.app.container_listing_shard_ranges_skip_cache = 0.10
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
with mock.patch('random.random', return_value=0.05):
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump,
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state,
'X-Backend-Override-Shard-Name-Filter': 'true'})
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
# Since there was a backend request, we go ahead and cache
# container info, too
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.hit',
'container.shard_listing.cache.skip.200'])
# ... or maybe we serve from cache
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
with mock.patch('random.random', return_value=0.11):
resp = req.get_response(self.app)
self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get(cache_key, raise_on_error=True)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.hit',
'container.shard_listing.cache.hit'])
# test request to hit infocache.
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request(
{'X-Backend-Record-Type': record_type},
{'states': 'listing'},
infocache=req.environ['swift.infocache'])
with mock.patch('random.random', return_value=0.11):
resp = req.get_response(self.app)
self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual([], self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertEqual(self.ns_bound_list,
req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.infocache.hit',
'container.shard_listing.infocache.hit'])
# put this back the way we found it for later subtests
self.app.container_listing_shard_ranges_skip_cache = 0.0
# delete the container; check that shard ranges are evicted from cache
self.memcache.clear_calls()
infocache = {}
req = Request.blank('/v1/a/c', method='DELETE')
req.environ['swift.cache'] = self.memcache
req.environ['swift.infocache'] = infocache
self._capture_backend_request(req, 204, b'', {},
num_resp=self.CONTAINER_REPLICAS)
self.assertEqual(
[mock.call.delete('container/a/c'),
mock.call.delete(cache_key)],
self.memcache.calls)
def test_get_from_shards_add_root_spi(self):
self._setup_shard_range_stubs()
shard_resp = mock.MagicMock(status_int=204, headers={})
def mock_get_container_listing(self_, req, *args, **kargs):
captured_hdrs.update(req.headers)
return None, shard_resp
# header in response -> header added to request
captured_hdrs = {}
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = mock.MagicMock(body=self._stub_shards_dump,
headers=self.root_resp_hdrs,
request=req)
resp.headers['X-Backend-Storage-Policy-Index'] = '0'
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_container_listing',
mock_get_container_listing):
controller_cls, d = self.app.get_controller(req)
controller = controller_cls(self.app, **d)
controller._get_from_shards(req, resp)
self.assertIn('X-Backend-Storage-Policy-Index', captured_hdrs)
self.assertEqual(
captured_hdrs['X-Backend-Storage-Policy-Index'], '0')
captured_hdrs = {}
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = mock.MagicMock(body=self._stub_shards_dump,
headers=self.root_resp_hdrs,
request=req)
resp.headers['X-Backend-Storage-Policy-Index'] = '1'
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_container_listing',
mock_get_container_listing):
controller_cls, d = self.app.get_controller(req)
controller = controller_cls(self.app, **d)
controller._get_from_shards(req, resp)
self.assertIn('X-Backend-Storage-Policy-Index', captured_hdrs)
self.assertEqual(
captured_hdrs['X-Backend-Storage-Policy-Index'], '1')
# header not added to request if not root request
captured_hdrs = {}
req = Request.blank('/v1/a/c',
environ={
'REQUEST_METHOD': 'GET',
'swift.shard_listing_history': [('a', 'c')]}
)
resp = mock.MagicMock(body=self._stub_shards_dump,
headers=self.root_resp_hdrs,
request=req)
resp.headers['X-Backend-Storage-Policy-Index'] = '0'
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_container_listing',
mock_get_container_listing):
controller_cls, d = self.app.get_controller(req)
controller = controller_cls(self.app, **d)
controller._get_from_shards(req, resp)
self.assertNotIn('X-Backend-Storage-Policy-Index', captured_hdrs)
# existing X-Backend-Storage-Policy-Index in request is respected
captured_hdrs = {}
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
req.headers['X-Backend-Storage-Policy-Index'] = '0'
resp = mock.MagicMock(body=self._stub_shards_dump,
headers=self.root_resp_hdrs,
request=req)
resp.headers['X-Backend-Storage-Policy-Index'] = '1'
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_container_listing',
mock_get_container_listing):
controller_cls, d = self.app.get_controller(req)
controller = controller_cls(self.app, **d)
controller._get_from_shards(req, resp)
self.assertIn('X-Backend-Storage-Policy-Index', captured_hdrs)
self.assertEqual(
captured_hdrs['X-Backend-Storage-Policy-Index'], '0')
def test_GET_shard_ranges(self):
self._setup_shard_range_stubs()
# expect shard ranges cache time to be default value of 600
self._do_test_caching('shard', 600)
# expect shard ranges cache time to be configured value of 120
self.app.recheck_listing_shard_ranges = 120
self._do_test_caching('shard', 120)
def mock_get_from_shards(self, req, resp):
# for the purposes of these tests we override _get_from_shards so
# that the response contains the shard listing even though the
# record_type is 'auto'; these tests are verifying the content and
# caching of the backend shard range response so we're not
# interested in gathering object from the shards
return resp
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_from_shards',
mock_get_from_shards):
self.app.recheck_listing_shard_ranges = 600
self._do_test_caching('auto', 600)
def test_GET_shard_ranges_404_response(self):
# pre-warm cache with container info but not shard ranges so that the
# backend request tries to get a cacheable listing, but backend 404's
self._setup_shard_range_stubs()
self.memcache.delete_all()
info = headers_to_container_info(self.root_resp_hdrs)
info['status'] = 200
info['sharding_state'] = 'sharded'
self.memcache.set('container/a/c', info)
self.memcache.clear_calls()
req = self._build_request({'X-Backend-Record-Type': 'shard'},
{'states': 'listing'}, {})
backend_req, resp = self._capture_backend_request(
req, 404, b'', {}, num_resp=2 * self.CONTAINER_REPLICAS)
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
self.assertNotIn('X-Backend-Cached-Results', resp.headers)
# Note: container metadata is updated in cache but shard ranges are not
# deleted from cache
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get('shard-listing-v2/a/c', raise_on_error=True),
mock.call.set('container/a/c', mock.ANY, time=6.0)],
self.memcache.calls)
self.assertEqual(404, self.memcache.calls[2][1][1]['status'])
self.assertEqual(b'', resp.body)
self.assertEqual(404, resp.status_int)
self.assertEqual({'container.info.cache.hit': 1,
'container.shard_listing.cache.miss.404': 1},
self.logger.statsd_client.get_increment_counts())
def test_GET_shard_ranges_read_from_cache_error(self):
self._setup_shard_range_stubs()
self.memcache = FakeMemcache()
self.memcache.delete_all()
self.logger.clear()
info = headers_to_container_info(self.root_resp_hdrs)
info['status'] = 200
info['sharding_state'] = 'sharded'
self.memcache.set('container/a/c', info)
self.memcache.clear_calls()
self.memcache.error_on_get = [False, True]
req = self._build_request({'X-Backend-Record-Type': 'shard'},
{'states': 'listing'}, {})
backend_req, resp = self._capture_backend_request(
req, 404, b'', {}, num_resp=2 * self.CONTAINER_REPLICAS)
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
self.assertNotIn('X-Backend-Cached-Results', resp.headers)
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get('shard-listing-v2/a/c', raise_on_error=True),
mock.call.set('container/a/c', mock.ANY, time=6.0)],
self.memcache.calls)
self.assertEqual(404, self.memcache.calls[2][1][1]['status'])
self.assertEqual(b'', resp.body)
self.assertEqual(404, resp.status_int)
self.assertEqual({'container.info.cache.hit': 1,
'container.shard_listing.cache.error.404': 1},
self.logger.statsd_client.get_increment_counts())
def _do_test_GET_shard_ranges_read_from_cache(self, params, record_type):
# pre-warm cache with container metadata and shard ranges and verify
# that shard range listing are read from cache when appropriate
self.memcache.delete_all()
self.logger.clear()
info = headers_to_container_info(self.root_resp_hdrs)
info['status'] = 200
info['sharding_state'] = 'sharded'
self.memcache.set('container/a/c', info)
self.memcache.set('shard-listing-v2/a/c', self.ns_bound_list.bounds)
self.memcache.clear_calls()
req_hdrs = {'X-Backend-Record-Type': record_type}
req = self._build_request(req_hdrs, params, {})
resp = req.get_response(self.app)
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.get('shard-listing-v2/a/c', raise_on_error=True)],
self.memcache.calls)
self.assertEqual({'container.info.cache.hit': 1,
'container.shard_listing.cache.hit': 1},
self.logger.statsd_client.get_increment_counts())
return resp
def test_GET_shard_ranges_read_from_cache(self):
self._setup_shard_range_stubs()
exp_hdrs = {'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'}
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing'}, 'shard')
self._check_response(resp, self.ns_dicts, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'reverse': 'true'}, 'shard')
exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam'}, 'shard')
self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'shard')
self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'includes': 'egg'}, 'shard')
self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
# override _get_from_shards so that the response contains the shard
# listing that we want to verify even though the record_type is 'auto'
def mock_get_from_shards(self, req, resp):
return resp
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_from_shards',
mock_get_from_shards):
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'reverse': 'true'}, 'auto')
exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam'}, 'auto')
self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'auto')
self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'includes': 'egg'}, 'auto')
self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
def _do_test_GET_shard_ranges_write_to_cache(self, params, record_type):
# verify that shard range listing are written to cache when appropriate
self.logger.clear()
self.memcache.delete_all()
self.memcache.clear_calls()
# set request up for cacheable listing
req_hdrs = {'X-Backend-Record-Type': record_type}
req = self._build_request(req_hdrs, params, {})
# response indicates cacheable listing
resp_hdrs = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'}
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump, resp_hdrs)
self._check_backend_req(
req, backend_req,
extra_params=params,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'}
expected_hdrs.update(resp_hdrs)
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set(
'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn(
'Caching listing shards for shard-listing-v2/a/c (3 shards)',
info_lines)
# shards were cached
self.assertEqual('sharded',
self.memcache.calls[2][1][1]['sharding_state'])
self.assertEqual({'container.info.cache.miss': 1,
'container.shard_listing.cache.bypass.200': 1},
self.logger.statsd_client.get_increment_counts())
return resp
def test_GET_shard_ranges_write_to_cache(self):
self._setup_shard_range_stubs()
exp_hdrs = {'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'}
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing'}, 'shard')
self._check_response(resp, self.ns_dicts, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'reverse': 'true'}, 'shard')
exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam'}, 'shard')
self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'shard')
self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'includes': 'egg'}, 'shard')
self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
# override _get_from_shards so that the response contains the shard
# listing that we want to verify even though the record_type is 'auto'
def mock_get_from_shards(self, req, resp):
return resp
with mock.patch('swift.proxy.controllers.container.'
'ContainerController._get_from_shards',
mock_get_from_shards):
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'reverse': 'true'}, 'auto')
exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam'}, 'auto')
self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'auto')
self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'includes': 'egg'}, 'auto')
self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
def test_GET_shard_ranges_write_to_cache_with_x_newest(self):
# when x-newest is sent, verify that there is no cache lookup to check
# sharding state but then backend requests are made requesting complete
# shard list which can be cached
self._setup_shard_range_stubs()
self.memcache.delete_all()
self.memcache.clear_calls()
req_hdrs = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true'}
params = {'states': 'listing'}
req = self._build_request(req_hdrs, params, {})
resp_hdrs = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'}
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump, resp_hdrs,
num_resp=2 * self.CONTAINER_REPLICAS)
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'}
expected_hdrs.update(resp_hdrs)
self._check_response(resp, self.ns_dicts, expected_hdrs)
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set(
'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual('sharded',
self.memcache.calls[2][1][1]['sharding_state'])
self.assertEqual({'container.info.cache.miss': 1,
'container.shard_listing.cache.force_skip.200': 1},
self.logger.statsd_client.get_increment_counts())
def _do_test_GET_shard_ranges_no_cache_write(self, resp_hdrs):
# verify that there is a cache lookup to check container info but then
# a backend request is made requesting complete shard list, but do not
# expect shard ranges to be cached; check that marker, end_marker etc
# are passed to backend
self.logger.clear()
self.memcache.clear_calls()
req = self._build_request(
{'X-Backend-Record-Type': 'shard'},
{'states': 'listing', 'marker': 'egg', 'end_marker': 'jam',
'reverse': 'true'}, {})
resp_shards = self.sr_dicts[:2]
resp_shards.reverse()
backend_req, resp = self._capture_backend_request(
req, 200, json.dumps(resp_shards).encode('ascii'),
resp_hdrs)
self._check_backend_req(
req, backend_req,
extra_params={'marker': 'egg', 'end_marker': 'jam',
'reverse': 'true'},
extra_hdrs={'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
expected_shards = self.sr_dicts[:2]
expected_shards.reverse()
expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'}
expected_hdrs.update(resp_hdrs)
self._check_response(resp, expected_shards, expected_hdrs)
# container metadata is looked up in memcache for sharding state
# container metadata is set in memcache
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(resp.headers.get('X-Backend-Sharding-State'),
self.memcache.calls[1][1][1]['sharding_state'])
self.memcache.delete_all()
def test_GET_shard_ranges_no_cache_write_with_cached_container_info(self):
# pre-warm cache with container info, but verify that shard range cache
# lookup is only attempted when the cached sharding state and status
# are suitable, and full set of headers can be constructed from cache;
# Note: backend response has state unsharded so no shard ranges cached
self._setup_shard_range_stubs()
def do_test(info):
self._setup_shard_range_stubs()
self.memcache.set('container/a/c', info)
# expect the same outcomes as if there was no cached container info
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'unsharded'})
# setup a default 'good' info
info = headers_to_container_info(self.root_resp_hdrs)
info['status'] = 200
info['sharding_state'] = 'sharded'
do_test(dict(info, status=404))
do_test(dict(info, sharding_state='unsharded'))
do_test(dict(info, sharding_state='sharding'))
do_test(dict(info, sharding_state='collapsed'))
do_test(dict(info, sharding_state='unexpected'))
stale_info = dict(info)
stale_info.pop('created_at')
do_test(stale_info)
stale_info = dict(info)
stale_info.pop('put_timestamp')
do_test(stale_info)
stale_info = dict(info)
stale_info.pop('delete_timestamp')
do_test(stale_info)
stale_info = dict(info)
stale_info.pop('status_changed_at')
do_test(stale_info)
def test_GET_shard_ranges_no_cache_write_for_non_sharded_states(self):
# verify that shard ranges are not written to cache when container
# state returned by backend is not 'sharded'; we don't expect
# 'X-Backend-Override-Shard-Name-Filter': 'true' to be returned unless
# the sharding state is 'sharded' but include it in this test to check
# that the state is checked by proxy controller
self._setup_shard_range_stubs()
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'unsharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharding'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'collapsed'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'unexpected'})
def test_GET_shard_ranges_no_cache_write_for_incomplete_listing(self):
# verify that shard ranges are not written to cache when container
# response does not acknowledge x-backend-override-shard-name-filter
# e.g. container server not upgraded
self._setup_shard_range_stubs()
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': 'sharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'false',
'X-Backend-Sharding-State': 'sharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'rogue',
'X-Backend-Sharding-State': 'sharded'})
def test_GET_shard_ranges_no_cache_write_for_object_listing(self):
# verify that shard ranges are not written to cache when container
# response does not return shard ranges
self._setup_shard_range_stubs()
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'object',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'other',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Record-Type': 'true',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'})
self._do_test_GET_shard_ranges_no_cache_write(
{'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'})
def _do_test_GET_shard_ranges_bad_response_body(self, resp_body):
# verify that resp body is not cached if shard range parsing fails;
# check the original unparseable response body is returned
self._setup_shard_range_stubs()
self.memcache.clear_calls()
req = self._build_request(
{'X-Backend-Record-Type': 'shard'},
{'states': 'listing'}, {})
resp_hdrs = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Sharding-State': 'sharded'}
backend_req, resp = self._capture_backend_request(
req, 200, json.dumps(resp_body).encode('ascii'),
resp_hdrs)
self._check_backend_req(
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'}
expected_hdrs.update(resp_hdrs)
self._check_response(resp, resp_body, expected_hdrs)
# container metadata is looked up in memcache for sharding state
# container metadata is set in memcache
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(resp.headers.get('X-Backend-Sharding-State'),
self.memcache.calls[1][1][1]['sharding_state'])
self.assertEqual({'container.info.cache.miss': 1,
'container.shard_listing.cache.bypass.200': 1},
self.logger.statsd_client.get_increment_counts())
self.memcache.delete_all()
def test_GET_shard_ranges_bad_response_body(self):
self._do_test_GET_shard_ranges_bad_response_body(
{'bad': 'data', 'not': ' a list'})
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines), error_lines)
self.assertIn('Problem with listing response', error_lines[0])
self.logger.clear()
self._do_test_GET_shard_ranges_bad_response_body(
[{'not': ' a shard range'}])
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines), error_lines)
self.assertIn('Failed to get shard ranges', error_lines[0])
self.logger.clear()
self._do_test_GET_shard_ranges_bad_response_body(
'not json')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines), error_lines)
self.assertIn('Problem with listing response', error_lines[0])
def _do_test_GET_shards_no_cache(self, sharding_state, req_params,
req_hdrs=None):
# verify that a shard GET request does not lookup in cache or attempt
# to cache shard ranges fetched from backend
self.memcache.delete_all()
self.memcache.clear_calls()
req_params.update(dict(marker='egg', end_marker='jam'))
hdrs = {'X-Backend-Record-Type': 'shard'}
if req_hdrs:
hdrs.update(req_hdrs)
req = self._build_request(hdrs, req_params, {})
resp_shards = self.sr_dicts[:2]
backend_req, resp = self._capture_backend_request(
req, 200, json.dumps(resp_shards).encode('ascii'),
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self._check_backend_req(
req, backend_req, extra_hdrs=hdrs, extra_params=req_params)
expected_shards = self.sr_dicts[:2]
self._check_response(resp, expected_shards, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
def _do_test_GET_shards_no_cache_listing(self, sharding_state):
# container metadata from backend response is set in memcache
self._do_test_GET_shards_no_cache(sharding_state,
{'states': 'listing'})
self.assertEqual(
[mock.call.get('container/a/c'),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[1][1][1]['sharding_state'])
def test_GET_shard_ranges_no_cache_recheck_listing_shard_ranges(self):
# verify that a GET for shards does not lookup or store in cache when
# cache expiry time is set to zero
self._setup_shard_range_stubs()
self.app.recheck_listing_shard_ranges = 0
self._do_test_GET_shards_no_cache_listing('unsharded')
self._do_test_GET_shards_no_cache_listing('sharding')
self._do_test_GET_shards_no_cache_listing('sharded')
self._do_test_GET_shards_no_cache_listing('collapsed')
self._do_test_GET_shards_no_cache_listing('unexpected')
def _do_test_GET_shards_no_cache_updating(self, sharding_state):
# container metadata from backend response is set in memcache
self._do_test_GET_shards_no_cache(sharding_state,
{'states': 'updating'})
self.assertEqual(
[mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[0][1][1]['sharding_state'])
def test_GET_shard_ranges_no_cache_when_requesting_updating_shards(self):
# verify that a GET for shards in updating states does not lookup or
# store in cache
self._setup_shard_range_stubs()
self._do_test_GET_shards_no_cache_updating('unsharded')
self._do_test_GET_shards_no_cache_updating('sharding')
self._do_test_GET_shards_no_cache_updating('sharded')
self._do_test_GET_shards_no_cache_updating('collapsed')
self._do_test_GET_shards_no_cache_updating('unexpected')
def test_GET_shard_ranges_no_cache_when_include_deleted_shards(self):
# verify that a GET for shards in listing states does not lookup or
# store in cache if x-backend-include-deleted is true
self._setup_shard_range_stubs()
self._do_test_GET_shards_no_cache(
'unsharded', {'states': 'listing'},
{'X-Backend-Include-Deleted': 'true'})
self._do_test_GET_shards_no_cache(
'sharding', {'states': 'listing'},
{'X-Backend-Include-Deleted': 'true'})
self._do_test_GET_shards_no_cache(
'sharded', {'states': 'listing'},
{'X-Backend-Include-Deleted': 'true'})
self._do_test_GET_shards_no_cache(
'collapsed', {'states': 'listing'},
{'X-Backend-Include-Deleted': 'true'})
self._do_test_GET_shards_no_cache(
'unexpected', {'states': 'listing'},
{'X-Backend-Include-Deleted': 'true'})
def test_GET_objects_makes_no_cache_lookup(self):
# verify that an object GET request does not lookup container metadata
# in cache
self._setup_shard_range_stubs()
self.memcache.delete_all()
self.memcache.clear_calls()
req_hdrs = {'X-Backend-Record-Type': 'object'}
# we would not expect states=listing to be used with an object request
# but include it here to verify that it is ignored
req = self._build_request(req_hdrs, {'states': 'listing'}, {})
resp_body = json.dumps(['object listing']).encode('ascii')
backend_req, resp = self._capture_backend_request(
req, 200, resp_body,
{'X-Backend-Record-Type': 'object',
'X-Backend-Sharding-State': 'sharded'})
self._check_backend_req(
req, backend_req,
extra_hdrs=req_hdrs)
self._check_response(resp, ['object listing'], {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'object',
'X-Backend-Sharding-State': 'sharded'})
# container metadata from backend response is set in memcache
self.assertEqual(
[mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual('sharded',
self.memcache.calls[0][1][1]['sharding_state'])
def test_GET_shard_ranges_no_memcache_available(self):
self._setup_shard_range_stubs()
self.memcache.clear_calls()
hdrs = {'X-Backend-Record-Type': 'shard'}
params = {'states': 'listing'}
req = self._build_request(hdrs, params, {})
req.environ['swift.cache'] = None
backend_req, resp = self._capture_backend_request(
req, 200, self._stub_shards_dump,
{'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': 'sharded'})
self._check_backend_req(
req, backend_req, extra_params=params, extra_hdrs=hdrs)
expected_shards = self.sr_dicts
self._check_response(resp, expected_shards, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': 'sharded'})
self.assertEqual([], self.memcache.calls) # sanity check
def test_cache_clearing(self):
# verify that both metadata and shard ranges are purged form memcache
# on PUT, POST and DELETE
def do_test(method, resp_status, num_resp):
self.assertGreater(num_resp, 0) # sanity check
memcache = FakeMemcache()
cont_key = get_cache_key('a', 'c')
shard_key = get_cache_key('a', 'c', shard='listing')
memcache.set(cont_key, 'container info', 60)
memcache.set(shard_key, 'shard ranges', 600)
req = Request.blank('/v1/a/c', method=method)
req.environ['swift.cache'] = memcache
self.assertIn(cont_key, req.environ['swift.cache'].store)
self.assertIn(shard_key, req.environ['swift.cache'].store)
resp_status = [resp_status] * num_resp
with mocked_http_conn(
*resp_status, body_iter=[b''] * num_resp,
headers=[{}] * num_resp):
resp = req.get_response(self.app)
self.assertEqual(resp_status[0], resp.status_int)
self.assertNotIn(cont_key, req.environ['swift.cache'].store)
self.assertNotIn(shard_key, req.environ['swift.cache'].store)
do_test('DELETE', 204, self.CONTAINER_REPLICAS)
do_test('POST', 204, self.CONTAINER_REPLICAS)
do_test('PUT', 202, self.CONTAINER_REPLICAS)
def test_GET_bad_requests(self):
# verify that the proxy controller enforces checks on request params
req = Request.blank(
'/v1/a/c?limit=%d' % (CONTAINER_LISTING_LIMIT + 1))
self.assertEqual(412, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?delimiter=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?marker=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?end_marker=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?prefix=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?format=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?path=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?includes=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
req = Request.blank('/v1/a/c?states=%ff')
self.assertEqual(400, req.get_response(self.app).status_int)
@patch_policies(
[StoragePolicy(0, 'zero', True, object_ring=FakeRing(replicas=4))])
class TestContainerController4Replicas(TestContainerController):
CONTAINER_REPLICAS = 4
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201, 201), 201),
((201, 201, 201, 404), 201),
((201, 201, 201, 503), 201),
((201, 201, 404, 404), 201),
((201, 201, 404, 503), 201),
((201, 201, 503, 503), 201),
((201, 404, 404, 404), 404),
((201, 404, 404, 503), 404),
((201, 404, 503, 503), 503),
((201, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/controllers/test_container.py |
swift-master | test/unit/proxy/controllers/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import math
import random
import time
import unittest
from collections import defaultdict
from contextlib import contextmanager
import json
import mock
from eventlet import Timeout, sleep
from eventlet.queue import Empty
import six
from six import StringIO
from six.moves import range
from six.moves.urllib.parse import quote
if six.PY2:
from email.parser import FeedParser as EmailFeedParser
else:
from email.parser import BytesFeedParser as EmailFeedParser
import swift
from swift.common import utils, swob, exceptions
from swift.common.exceptions import ChunkWriteTimeout, ShortReadError, \
ChunkReadTimeout
from swift.common.utils import Timestamp, list_from_csv, md5, FileLikeIter
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import \
get_container_info as _real_get_container_info, GetterSource, \
NodeIter
from swift.common.storage_policy import POLICIES, ECDriverError, \
StoragePolicy, ECStoragePolicy
from swift.common.swob import Request
from test.debug_logger import debug_logger
from test.unit import (
FakeRing, fake_http_connect, patch_policies, SlowBody, FakeStatus,
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub,
fake_ec_node_response, StubResponse, mocked_http_conn,
quiet_eventlet_exceptions, FakeSource)
from test.unit.proxy.test_server import node_error_count
def unchunk_body(chunked_body):
body = b''
remaining = chunked_body
while remaining:
hex_length, remaining = remaining.split(b'\r\n', 1)
length = int(hex_length, 16)
body += remaining[:length]
remaining = remaining[length + 2:]
return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over the proxy server's __call__ to ensure
that calls to get_container_info will return the stubbed value for
container_info if it's a container info call.
"""
container_info = {}
per_container_info = {}
def __call__(self, *args, **kwargs):
def _fake_get_container_info(env, app, swift_source=None):
_vrs, account, container, _junk = utils.split_path(
swob.wsgi_to_str(env['PATH_INFO']), 3, 4)
# Seed the cache with our container info so that the real
# get_container_info finds it.
ic = env.setdefault('swift.infocache', {})
cache_key = "container/%s/%s" % (account, container)
old_value = ic.get(cache_key)
# Copy the container info so we don't hand out a reference to a
# mutable thing that's set up only once at compile time. Nothing
# *should* mutate it, but it's better to be paranoid than wrong.
if container in self.per_container_info:
ic[cache_key] = self.per_container_info[container].copy()
else:
ic[cache_key] = self.container_info.copy()
real_info = _real_get_container_info(env, app, swift_source)
if old_value is None:
del ic[cache_key]
else:
ic[cache_key] = old_value
return real_info
with mock.patch('swift.proxy.server.get_container_info',
new=_fake_get_container_info), \
mock.patch('swift.proxy.controllers.base.get_container_info',
new=_fake_get_container_info):
return super(
PatchedObjControllerApp, self).__call__(*args, **kwargs)
def make_footers_callback(body=None):
# helper method to create a footers callback that will generate some fake
# footer metadata
cont_etag = 'container update etag may differ'
crypto_etag = '20242af0cd21dd7195a10483eb7472c9'
etag_crypto_meta = \
'{"cipher": "AES_CTR_256", "iv": "sD+PSw/DfqYwpsVGSo0GEw=="}'
etag = md5(body,
usedforsecurity=False).hexdigest() if body is not None else None
footers_to_add = {
'X-Object-Sysmeta-Container-Update-Override-Etag': cont_etag,
'X-Object-Sysmeta-Crypto-Etag': crypto_etag,
'X-Object-Sysmeta-Crypto-Meta-Etag': etag_crypto_meta,
'X-I-Feel-Lucky': 'Not blocked',
'Etag': etag}
def footers_callback(footers):
footers.update(footers_to_add)
return footers_callback
class BaseObjectControllerMixin(object):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
# this needs to be set on the test case
controller_cls = None
def setUp(self):
# setup fake rings with handoffs
for policy in POLICIES:
policy.object_ring.max_more_nodes = policy.object_ring.replicas
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
# increase connection timeout to avoid intermittent failures
conf = {'conn_timeout': 1.0}
self.app = PatchedObjControllerApp(
conf, account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
self.logger.clear() # startup/loading debug msgs not helpful
# you can over-ride the container_info just by setting it on the app
# (see PatchedObjControllerApp for details)
self.app.container_info = dict(self.container_info)
# default policy and ring references
self.policy = POLICIES.default
self.obj_ring = self.policy.object_ring
self._ts_iter = (utils.Timestamp(t) for t in
itertools.count(int(time.time())))
def ts(self):
return next(self._ts_iter)
def replicas(self, policy=None):
policy = policy or POLICIES.default
return policy.object_ring.replicas
def quorum(self, policy=None):
policy = policy or POLICIES.default
return policy.quorum
class CommonObjectControllerMixin(BaseObjectControllerMixin):
# defines tests that are common to all storage policy types
def test_iter_nodes_local_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
for node in all_nodes:
node['use_replication'] = False
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, Request.blank('')))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
# we'll write to one more than replica count local nodes
policy_conf.write_affinity_node_count_fn = lambda r: r + 1
object_ring = self.policy.object_ring
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100000
# nothing magic about * 2 + 3, just a way to make it bigger
self.app.request_node_count = lambda r: r * 2 + 3
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
for node in all_nodes:
node['use_replication'] = False
# limit to the number we're going to look at in this request
nodes_requested = self.app.request_node_count(object_ring.replicas)
all_nodes = all_nodes[:nodes_requested]
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertGreaterEqual(len(all_local_nodes), self.replicas() + 1)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, Request.blank('')))
# the local nodes move up in the ordering
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes, key=lambda dev: dev['id']),
sorted(local_first_nodes, key=lambda dev: dev['id']))
for node in all_nodes:
node['use_replication'] = True
req = Request.blank(
'/v1/a/c', headers={'x-backend-use-replication-network': 'yes'})
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, request=req))
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes, key=lambda dev: dev['id']),
sorted(local_first_nodes, key=lambda dev: dev['id']))
def test_iter_nodes_local_first_best_effort(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
for node in all_nodes:
node['use_replication'] = False
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, request=Request.blank('')))
# we won't have quite enough local nodes...
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertEqual(len(all_local_nodes), self.replicas())
# but the local nodes we do have are at the front of the local iter
first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
self.assertEqual(sorted(all_local_nodes, key=lambda dev: dev['id']),
sorted(first_n_local_first_nodes,
key=lambda dev: dev['id']))
# but we *still* don't *skip* any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes, key=lambda dev: dev['id']),
sorted(local_first_nodes, key=lambda dev: dev['id']))
def test_iter_nodes_local_handoff_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
for node in all_nodes:
node['use_replication'] = False
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True,
request=Request.blank('')))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_handoff_local_first_default(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
for node in all_nodes:
node['use_replication'] = False
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True,
request=Request.blank('')))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes, key=lambda dev: dev['id']),
sorted(first_primary_nodes,
key=lambda dev: dev['id']))
handoff_count = self.replicas() - len(primary_nodes)
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_iter_nodes_handoff_local_first_non_default(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
policy_conf.write_affinity_handoff_delete_count = 1
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
for node in all_nodes:
node['use_replication'] = False
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
for node in local_handoffs:
node['use_replication'] = False
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True,
request=Request.blank('')))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes, key=lambda dev: dev['id']),
sorted(first_primary_nodes,
key=lambda dev: dev['id']))
handoff_count = policy_conf.write_affinity_handoff_delete_count
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_connect_put_node_timeout(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', req, {}, ('', ''))
self.assertIsNone(res)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_missing_one(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] + [204] * (self.replicas() - 1)
random.shuffle(codes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_not_found(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] * (self.replicas() - 1) + [204]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_mostly_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_204s = [204] * self.quorum()
codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_mostly_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_404s = [404] * self.quorum()
codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = (b'not found', b'not found', b'', b'')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('Pick-Me'), 'yes')
self.assertEqual(resp.body, b'')
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_limits_expirer_queue_updates(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_expirer_DELETE_suppresses_expirer_queue_updates(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Backend-Clean-Expiring-Object-Queue': 'no'})
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
o_replicas = self.replicas()
self.assertEqual(counts, {
True: 0,
False: o_replicas,
None: 0,
})
# Make sure we're not sending any expirer-queue update headers here.
# Since we're not updating the expirer queue, these headers would be
# superfluous.
for headers in captured_headers:
self.assertNotIn('X-Delete-At-Container', headers)
self.assertNotIn('X-Delete-At-Partition', headers)
self.assertNotIn('X-Delete-At-Host', headers)
self.assertNotIn('X-Delete-At-Device', headers)
def test_DELETE_write_affinity_after_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() // 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas() + [404] * handoff_count
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_write_affinity_before_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() // 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = ([204] * (self.replicas() - handoff_count) +
[404] * handoff_count +
[204] * handoff_count)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_PUT_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, give_connect=capture_headers,
expect_headers=expect_headers):
# this req may or may not succeed depending on the Putter type used
# but that's ok because we're only interested in verifying the
# headers that were sent
req.get_response(self.app)
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='POST', body=b'',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'Non-integer X-Delete-After', resp.body)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'X-Delete-After in past', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'X-Delete-At in past', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(b'X-Delete-At in past', resp.body)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(*([200] * self.replicas())):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp.now(offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_some_404s_and_507s(self):
self.policy.object_ring.max_more_nodes = (3 * self.replicas())
req = swob.Request.blank('/v1/a/c/o', method='HEAD')
responses = [StubResponse(
404, headers={'X-Backend-Timestamp': '2'})] * self.replicas()
responses += [StubResponse(507, headers={})] * (
self.policy.object_ring.max_more_nodes - self.replicas())
self.assertEqual(len(responses), 3 * self.replicas()) # sanity
def get_response(req):
return responses.pop(0)
with capture_http_requests(get_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Backend-Timestamp'], '2')
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts).internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(next(ts).internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_PUT_requires_length(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 411)
def test_container_update_backend_requests(self):
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy)})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# This is the number of container updates we're doing, simulating
# 1 to 15 container replicas.
for num_containers in range(1, 16):
containers = [{'ip': '1.0.0.%s' % i,
'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers)
# how many of the backend headers have a container update
n_container_updates = len(
[headers for headers in backend_headers
if 'X-Container-Partition' in headers])
# how many object-server PUTs can fail and still let the
# client PUT succeed
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_containers))
# you get at least one update per container no matter what
n_expected_updates = max(
n_expected_updates, num_containers)
# you can't have more object requests with updates than you
# have object requests (the container stuff gets doubled up,
# but that's not important for purposes of durability)
n_expected_updates = min(
n_expected_updates, self.replicas(policy))
self.assertEqual(n_expected_updates, n_container_updates)
def test_delete_at_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_del_at_nodes in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_del_at_nodes)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_del_at_nodes)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
devices = []
hosts = []
part = ctr = 0
for given_headers in backend_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
if 'X-Delete-At-Partition' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Partition'), '2')
part += 1
if 'X-Delete-At-Container' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Container'), 'dac')
ctr += 1
devices += (
list_from_csv(given_headers.get('X-Delete-At-Device')))
hosts += (
list_from_csv(given_headers.get('X-Delete-At-Host')))
# same as in test_container_update_backend_requests
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_del_at_nodes))
n_expected_hosts = max(
n_expected_updates, num_del_at_nodes)
self.assertEqual(len(hosts), n_expected_hosts)
self.assertEqual(len(devices), n_expected_hosts)
# parts don't get doubled up, maximum is count of obj requests
n_expected_parts = min(
n_expected_hosts, self.replicas(policy))
self.assertEqual(part, n_expected_parts)
self.assertEqual(ctr, n_expected_parts)
# check that hosts are correct
self.assertEqual(
set(hosts),
set('%s:%s' % (h['ip'], h['port']) for h in del_at_nodes))
self.assertEqual(set(devices), set(('sdb',)))
def test_smooth_distributed_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_containers in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_containers)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
# caculate no of expected updates, see
# test_container_update_backend_requests for explanation
n_expected_updates = min(max(
self.replicas(policy) - self.quorum(policy) +
utils.quorum_size(num_containers), num_containers),
self.replicas(policy))
# the first n_expected_updates servers should have received
# a container update
self.assertTrue(
all([h.get('X-Container-Partition')
for h in backend_headers[:n_expected_updates]]))
# the last n_expected_updates servers should have received
# the x-delete-at* headers
self.assertTrue(
all([h.get('X-Delete-At-Container')
for h in backend_headers[-n_expected_updates:]]))
def _check_write_affinity(
self, conf, policy_conf, policy, affinity_regions, affinity_count):
conf['policy_config'] = policy_conf
app = PatchedObjControllerApp(
conf, account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
controller = self.controller_cls(app, 'a', 'c', 'o')
object_ring = app.get_object_ring(int(policy))
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
n['region'] in affinity_regions]
self.assertGreaterEqual(len(all_local_nodes), affinity_count)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, Request.blank(''), policy))
# check that the required number of local nodes were moved up the order
node_regions = [node['region'] for node in local_first_nodes]
self.assertTrue(
all(r in affinity_regions for r in node_regions[:affinity_count]),
'Unexpected region found in local nodes, expected %s but got %s' %
(affinity_regions, node_regions))
return app
def test_write_affinity_not_configured(self):
# default is no write affinity so expect both regions 0 and 1
self._check_write_affinity({}, {}, POLICIES[0], [0, 1],
2 * self.replicas(POLICIES[0]))
self._check_write_affinity({}, {}, POLICIES[1], [0, 1],
2 * self.replicas(POLICIES[1]))
def test_write_affinity_proxy_server_config(self):
# without overrides policies use proxy-server config section options
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
self._check_write_affinity(conf, {}, POLICIES[0], [0],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, {}, POLICIES[1], [0],
self.replicas(POLICIES[1]))
def test_write_affinity_per_policy_config(self):
# check only per-policy configuration is sufficient
conf = {}
policy_conf = {'0': {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '5',
'write_affinity': 'r0'}}
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0], 5)
def test_write_affinity_per_policy_config_overrides_and_inherits(self):
# check per-policy config is preferred over proxy-server section config
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
policy_conf = {'0': {'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '3 * replicas'}}
# policy 0 inherits default node count, override affinity to r1
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
# policy 1 inherits default affinity to r0, overrides node count
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0],
3 * self.replicas(POLICIES[1]))
# end of CommonObjectControllerMixin
@patch_policies()
class TestReplicatedObjController(CommonObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_with_footers(self):
footers_callback = make_footers_callback(b'')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.headers['content-length'] = '0'
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _test_PUT_with_no_footers(self, test_body=b'', chunked=False):
# verify that when no footers are required then the PUT uses a regular
# single part body
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=test_body)
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
etag = md5(test_body, usedforsecurity=False).hexdigest()
req.headers['Etag'] = etag
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
codes = [201] * self.replicas()
expect_headers = {'X-Obj-Metadata-Footer': 'yes'}
resp_headers = {
'Some-Header': 'Four',
'Etag': '"%s"' % etag,
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': etag,
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = b''.join(info['chunks'])
headers = info['headers']
if chunked or not test_body:
body = unchunk_body(body)
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertNotIn('Content-Length', headers)
else:
self.assertNotIn('Transfer-Encoding', headers)
if body or not test_body:
self.assertEqual('100-continue', headers['Expect'])
else:
self.assertNotIn('Expect', headers)
self.assertNotIn('X-Backend-Obj-Multipart-Mime-Boundary', headers)
self.assertNotIn('X-Backend-Obj-Metadata-Footer', headers)
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual(etag, headers['Etag'])
self.assertEqual(test_body, body)
self.assertTrue(info['connection'].closed)
def test_PUT_with_chunked_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body=b'asdf', chunked=True)
def test_PUT_with_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body=b'asdf', chunked=False)
def test_PUT_with_no_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body=b'', chunked=False)
def test_txn_id_logging_on_PUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
req.headers['content-length'] = '0'
# we capture stdout since the debug log formatter prints the formatted
# message to stdout
stdout = StringIO()
with set_http_connect((100, Timeout()), 503, 503), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get final status of PUT to',
stdout.getvalue())
def test_PUT_empty_bad_etag(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['Content-Length'] = '0'
req.headers['Etag'] = '"catbus"'
# The 2-tuple here makes getexpect() return 422, not 100. For objects
# that are >0 bytes, you get a 100 Continue and then a 422
# Unprocessable Entity after sending the body. For zero-byte objects,
# though, you get the 422 right away because no Expect header is sent
# with zero-byte PUT. The second status in the tuple should not be
# consumed, it's just there to make the FakeStatus treat the first as
# an expect status, but we'll make it something other than a 422 so
# that if it is consumed then the test should fail.
codes = [FakeStatus((422, 200))
for _junk in range(self.replicas())]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 422)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n, *args, **kwargs: n # disable shuffle
def test_status_map(statuses, expected):
self.app.error_limiter.stats.clear()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_limiter.suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_limiter.suppression_limit + 1)
def test_PUT_connect_exception_with_unicode_path(self):
expected = 201
statuses = (
Exception('Connection refused: Please insert ten dollars'),
201, 201, 201)
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body=b'life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(quote(req.swift_entity_path), log_lines[0])
self.assertIn('re: Expect: 100-continue', log_lines[0])
def test_PUT_get_expect_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body=b'life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return log_lines
log_lines = do_test((201, (507, None), 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
log_lines = do_test((201, (503, None), 201, 201))
self.assertIn('ERROR 503 Expect: 100-continue From Object Server',
log_lines[0])
def test_PUT_send_exception_with_unicode_path(self):
def do_test(exc):
conns = set()
def capture_send(conn, data):
conns.add(conn)
if len(conns) == 2:
raise exc
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body=b'life is utf-gr8')
self.app.logger.clear()
with set_http_connect(201, 201, 201, give_send=capture_send):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(quote(req.swift_entity_path), log_lines[0])
self.assertIn('Trying to write to', log_lines[0])
do_test(Exception('Exception while sending data on connection'))
do_test(ChunkWriteTimeout())
def test_PUT_final_response_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body=b'life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test((201, (100, Exception('boom')), 201))
self.assertIn('ERROR with Object server', log_lines[0])
if six.PY3:
self.assertIn(req.path, log_lines[0])
else:
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, Timeout()), 201))
self.assertIn('ERROR with Object server', log_lines[0])
if six.PY3:
self.assertIn(req.path, log_lines[0])
else:
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, 507), 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test((201, (100, 500), 201))
if six.PY3:
# We allow the b'' in logs because we want to see bad characters.
self.assertIn(
"ERROR 500 b'' Trying to PUT /v1/AUTH_kilroy/%ED%88%8E/"
"%E9%90%89 From Object Server", log_lines[0])
self.assertIn(req.path, log_lines[0])
else:
self.assertIn(
'ERROR 500 Trying to PUT /v1/AUTH_kilroy/%ED%88%8E/%E9%90%89 '
'From Object Server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
def test_DELETE_errors(self):
# verify logged errors with and without non-ascii characters in path
def do_test(path, statuses):
req = swob.Request.blank('/v1' + path,
method='DELETE',
body=b'life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
if six.PY3:
self.assertIn(req.swift_entity_path, log_lines[0])
else:
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
if six.PY3:
self.assertIn(req.swift_entity_path, log_lines[0])
else:
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
if six.PY3:
self.assertIn(req.swift_entity_path, log_lines[0])
else:
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
if six.PY3:
self.assertIn(req.swift_entity_path, log_lines[0])
else:
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
def test_DELETE_with_write_affinity(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() // 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204, 204, 404, 204]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
codes = [204, 404, 404, 204]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
policy_conf.write_affinity_handoff_delete_count = 2
codes = [204, 204, 404, 204, 404]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
codes = [204, 404, 404, 204, 204]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_PUT_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
conns = []
def capture_expect(conn):
# stash connections so that we can verify they all get closed
conns.append(conn)
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201, give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
def test_PUT_insufficient_data_from_client(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
conns = []
def capture_expect(conn):
# stash connections so that we can verify they all get closed
conns.append(conn)
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='7 bytes')
req.headers['content-length'] = '99'
with set_http_connect(201, 201, 201, give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
warning_lines = self.app.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines))
self.assertIn('Client disconnected without sending enough data',
warning_lines[0])
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
def test_PUT_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'Connection': 'close'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def test_GET_slow_read(self):
self.app.recoverable_node_timeout = 0.01
self.app.client_timeout = 0.1
self.app.object_chunk_size = 10
body = b'test'
etag = md5(body, usedforsecurity=False).hexdigest()
headers = {
'Etag': etag,
'Content-Length': len(body),
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [(200, body, headers)] * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o')
# make the first response slow...
read_sleeps = [0.1, 0]
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers, slow=read_sleeps) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
_ = resp.body
self.assertEqual(len(log.requests), 2)
def make_key(r):
r['device'] = r['path'].split('/')[1]
return '%(ip)s:%(port)s/%(device)s' % r
# the first node got errors incr'd
expected_error_limiting = {
make_key(log.requests[0]): {
'errors': 1,
'last_error': mock.ANY,
}
}
actual = {}
for n in self.app.get_object_ring(int(self.policy)).devs:
node_key = self.app.error_limiter.node_key(n)
stats = self.app.error_limiter.stats.get(node_key) or {}
if stats:
actual[self.app.error_limiter.node_key(n)] = stats
self.assertEqual(actual, expected_error_limiting)
for read_line in self.app.logger.get_lines_for_level('error'):
self.assertIn("Trying to read object during GET (retrying)",
read_line)
self.assertEqual(
len(self.logger.logger.records['ERROR']), 1,
'Expected 1 ERROR lines, got %r' % (
self.logger.logger.records['ERROR'], ))
def test_GET_resuming_ignores_416(self):
# verify that a resuming getter will not try to use the content of a
# 416 response (because it's etag will mismatch that from the first
# response)
self.app.recoverable_node_timeout = 0.01
self.app.client_timeout = 0.1
self.app.object_chunk_size = 10
body = b'length 8'
body_short = b'four'
body_416 = b'<html><h1>Requested Range Not Satisfiable</h1>' \
b'<p>The Range requested is not available.</p></html>'
etag = md5(body, usedforsecurity=False).hexdigest()
etag_short = md5(body_short, usedforsecurity=False).hexdigest()
headers_206 = {
'Etag': etag,
'Content-Length': len(body),
'X-Timestamp': Timestamp(self.ts()).normal,
'Content-Range': 'bytes 7-8/8'
}
headers_416 = {
# note: 416 when applying the same range implies different object
# length and therefore different etag
'Etag': etag_short,
'Content-Length': len(body_416),
'X-Timestamp': Timestamp(self.ts()).normal,
'Content-Range': 'bytes */4'
}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', headers={'Range': 'bytes=7-8'})
# make the first response slow...
read_sleeps = [0.1, 0]
with mocked_http_conn(206, 416, 206, body_iter=[body, body_416, body],
headers=[headers_206, headers_416, headers_206],
slow=read_sleeps) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
resp_body = resp.body
self.assertEqual(b'length 8', resp_body)
self.assertEqual(len(log.requests), 3)
self.assertEqual('bytes=7-8', log.requests[0]['headers']['Range'])
self.assertEqual('bytes=7-8', log.requests[1]['headers']['Range'])
self.assertEqual('bytes=7-8', log.requests[2]['headers']['Range'])
def test_GET_resuming(self):
self.app.recoverable_node_timeout = 0.01
self.app.client_timeout = 0.1
self.app.object_chunk_size = 10
body = b'length 8'
etag = md5(body, usedforsecurity=False).hexdigest()
headers_200 = {
'Etag': etag,
'Content-Length': len(body),
'X-Timestamp': Timestamp(self.ts()).normal,
}
headers_206 = {
# note: use of 'X-Backend-Ignore-Range-If-Metadata-Present' in
# request means that 200 response did not evaluate the Range and
# the proxy modifies requested backend range accordingly
'Etag': etag,
'Content-Length': len(body),
'X-Timestamp': Timestamp(self.ts()).normal,
'Content-Range': 'bytes 0-7/8'
}
req = swift.common.swob.Request.blank(
'/v1/a/c/o',
headers={'Range': 'bytes=9-10, 20-30',
'X-Backend-Ignore-Range-If-Metadata-Present':
'X-Static-Large-Object'})
# make the first 2 responses slow...
read_sleeps = [0.1, 0.1, 0]
with mocked_http_conn(200, 206, 206, body_iter=[body, body, body],
headers=[headers_200, headers_206, headers_206],
slow=read_sleeps) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_body = resp.body
self.assertEqual(b'length 8', resp_body)
self.assertEqual(len(log.requests), 3)
# NB: original range is not satisfiable but is ignored
self.assertEqual('bytes=9-10, 20-30',
log.requests[0]['headers']['Range'])
self.assertIn('X-Backend-Ignore-Range-If-Metadata-Present',
log.requests[0]['headers'])
# backend Range is updated to something that is satisfiable
self.assertEqual('bytes=0-7,20-30',
log.requests[1]['headers']['Range'])
self.assertNotIn('X-Backend-Ignore-Range-If-Metadata-Present',
log.requests[1]['headers'])
self.assertEqual('bytes=0-7,20-30',
log.requests[2]['headers']['Range'])
self.assertNotIn('X-Backend-Ignore-Range-If-Metadata-Present',
log.requests[2]['headers'])
def test_GET_transfer_encoding_chunked(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'transfer-encoding': 'chunked'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked')
def _test_removes_swift_bytes(self, method):
req = swift.common.swob.Request.blank('/v1/a/c/o', method=method)
with set_http_connect(
200, headers={'content-type': 'image/jpeg; swift_bytes=99'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'], 'image/jpeg')
def test_GET_removes_swift_bytes(self):
self._test_removes_swift_bytes('GET')
def test_HEAD_removes_swift_bytes(self):
self._test_removes_swift_bytes('HEAD')
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id'
stdout = StringIO()
with set_http_connect(503, 200), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for line in stdout.getvalue().splitlines():
self.assertIn('my-txn-id', line)
self.assertIn('From Object Server', stdout.getvalue())
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_primaries_explode(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [Exception('kaboom!')] * self.obj_ring.replicas + (
[404] * self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_GET_primaries_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [Timeout()] * self.obj_ring.replicas + (
[404] * self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_HEAD_error_limit_supression_count(self):
def do_test(primary_codes, expected, clear_stats=True):
if clear_stats:
self.app.error_limiter.stats.clear()
random.shuffle(primary_codes)
handoff_codes = [404] * self.obj_ring.max_more_nodes
with set_http_connect(*primary_codes + handoff_codes):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='HEAD')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
policy_opts = self.app.get_policy_options(None)
policy_opts.rebalance_missing_suppression_count = 1
# even with disks unmounted you can run with suppression_count = 1
do_test([507, 404, 404], 404)
# error limiting can make things wonky
do_test([404, 404], 404, clear_stats=False)
# and it gets a little dicy rebooting nodes
do_test([Timeout(), 404], 503, clear_stats=False)
do_test([507, Timeout(), 404], 503)
# unless you turn it off
policy_opts.rebalance_missing_suppression_count = 0
do_test([507, Timeout(), 404], 404)
def test_GET_primaries_error_during_rebalance(self):
def do_test(primary_codes, expected, include_timestamp=False):
random.shuffle(primary_codes)
handoff_codes = [404] * self.obj_ring.max_more_nodes
headers = None
if include_timestamp:
headers = [{'X-Backend-Timestamp': '123.456'}] * 3
headers.extend({} for _ in handoff_codes)
with set_http_connect(*primary_codes + handoff_codes,
headers=headers):
req = swift.common.swob.Request.blank('/v1/a/c/o')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
# with two of out three backend errors a client should retry
do_test([Timeout(), Exception('kaboom!'), 404], 503)
# unless there's a timestamp associated
do_test([Timeout(), Exception('kaboom!'), 404], 404,
include_timestamp=True)
# when there's more 404s, we trust it more
do_test([Timeout(), 404, 404], 404)
# unless we explicitly *don't* want to trust it
policy_opts = self.app.get_policy_options(None)
policy_opts.rebalance_missing_suppression_count = 2
do_test([Timeout(), 404, 404], 503)
# overloaded primary after double rebalance
# ... opts should increase rebalance_missing_suppression_count
policy_opts.rebalance_missing_suppression_count = 2
do_test([Timeout(), 404, 404], 503)
# two primaries out, but no rebalance
# ... default is fine for tombstones
policy_opts.rebalance_missing_suppression_count = 1
do_test([Timeout(), Exception('kaboom!'), 404], 404,
include_timestamp=True)
# ... but maybe not ideal for missing names
# (N.B. 503 isn't really a BAD response here)
do_test([Timeout(), Exception('kaboom!'), 404], 503)
# still ... ops might think they should tune it down
policy_opts.rebalance_missing_suppression_count = 0
do_test([Timeout(), Exception('kaboom!'), 404], 404)
# and we could maybe leave it like this for the next rebalance
do_test([Timeout(), 404, 404], 404)
# ... but it gets bad when faced with timeouts, b/c we can't trust a
# single primary 404 response during rebalance
do_test([Timeout(), Timeout(), 404], 404)
# ops needs to fix configs to get the 503
policy_opts.rebalance_missing_suppression_count = 1
do_test([Timeout(), Timeout(), 404], 503)
def test_GET_primaries_mixed_explode_and_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
primaries = []
for i in range(self.obj_ring.replicas):
if i % 2:
primaries.append(Timeout())
else:
primaries.append(Exception('kaboom!'))
codes = primaries + [404] * self.obj_ring.max_more_nodes
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_primary_returns_some_nonsense_timestamp(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# an un-handled ValueError in _make_node_request should just continue
# to the next node rather than hang the request
headers = [{'X-Backend-Timestamp': 'not-a-timestamp'}, {}]
codes = [200, 200]
with quiet_eventlet_exceptions(), set_http_connect(
*codes, headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * self.obj_ring.replicas + \
[200] * self.obj_ring.max_more_nodes
ts_iter = iter([2] * self.obj_ring.replicas +
[1] * self.obj_ring.max_more_nodes)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_x_newest_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = ([200] +
[404] * self.obj_ring.replicas +
[200] * (self.obj_ring.max_more_nodes - 1))
ts_iter = iter([1] +
[2] * self.obj_ring.replicas +
[1] * (self.obj_ring.max_more_nodes - 1))
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body=b'',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(next(ts).internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([None, None, None])
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_other_weird_success_response(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_if_none_match(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'If-None-Match': '*',
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_x_timestamp_not_overridden(self):
def do_test(method, base_headers, resp_code):
# no given x-timestamp
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=base_headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertIn('X-Timestamp', req['headers'])
# check value can be parsed as valid timestamp
Timestamp(req['headers']['X-Timestamp'])
# given x-timestamp is retained
def do_check(ts):
headers = dict(base_headers)
headers['X-Timestamp'] = ts.internal
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertEqual(ts.internal,
req['headers']['X-Timestamp'])
do_check(Timestamp.now())
do_check(Timestamp.now(offset=123))
# given x-timestamp gets sanity checked
headers = dict(base_headers)
headers['X-Timestamp'] = 'bad timestamp'
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
with mocked_http_conn() as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertIn(b'X-Timestamp should be a UNIX timestamp ',
resp.body)
do_test('PUT', {'Content-Length': 0}, 200)
do_test('DELETE', {}, 204)
@patch_policies(
[StoragePolicy(0, '1-replica', True),
StoragePolicy(1, '4-replica', False),
StoragePolicy(2, '8-replica', False),
StoragePolicy(3, '15-replica', False)],
fake_ring_args=[
{'replicas': 1}, {'replicas': 4}, {'replicas': 8}, {'replicas': 15}])
class TestReplicatedObjControllerVariousReplicas(CommonObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_DELETE_with_write_affinity(self):
policy_index = 1
self.policy = POLICIES[policy_index]
policy_conf = self.app.get_policy_options(self.policy)
self.app.container_info['storage_policy'] = policy_index
policy_conf.write_affinity_handoff_delete_count = \
self.replicas(self.policy) // 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204, 204, 404, 404, 204, 204]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
policy_conf.write_affinity_handoff_delete_count = 1
codes = [204, 204, 404, 404, 204]
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
@patch_policies()
class TestReplicatedObjControllerMimePutter(BaseObjectControllerMixin,
unittest.TestCase):
# tests specific to PUTs using a MimePutter
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
def setUp(self):
super(TestReplicatedObjControllerMimePutter, self).setUp()
# force use of a MimePutter
self.app.use_put_v1 = False
def test_PUT_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [503] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _test_PUT_with_footers(self, test_body=b''):
# verify that when footers are required the PUT body is multipart
# and the footers are appended
footers_callback = make_footers_callback(test_body)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.body = test_body
# send bogus Etag header to differentiate from footer value
req.headers['Etag'] = 'header_etag'
codes = [201] * self.replicas()
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
resp_headers = {
'Etag': '"resp_etag"',
# NB: ignored!
'Some-Header': 'Four',
}
with set_http_connect(*codes, expect_headers=self.expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': 'resp_etag',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = unchunk_body(b''.join(info['chunks']))
headers = info['headers']
boundary = headers['X-Backend-Obj-Multipart-Mime-Boundary']
self.assertTrue(boundary is not None,
"didn't get boundary for conn %r" % (
connection_id,))
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('yes', headers['X-Backend-Obj-Metadata-Footer'])
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual('header_etag', headers['Etag'])
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = EmailFeedParser()
parser.feed(
("Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
boundary).encode('ascii'))
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
# notice, no commit confirmation
self.assertEqual(len(mime_parts), 2)
obj_part, footer_part = mime_parts
self.assertEqual(obj_part['X-Document'], 'object body')
self.assertEqual(test_body, obj_part.get_payload(decode=True))
# validate footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
footers_callback(expected)
self.assertDictEqual(expected, footer_metadata)
self.assertTrue(info['connection'].closed)
def test_PUT_with_body_and_footers(self):
self._test_PUT_with_footers(test_body=b'asdf')
def test_PUT_with_no_body_and_footers(self):
self._test_PUT_with_footers()
@contextmanager
def capture_http_requests(get_response):
class FakeConn(object):
def __init__(self, req):
self.req = req
self.resp = None
self.path = "/"
self.closed = False
def getresponse(self):
self.resp = get_response(self.req)
return self.resp
def putrequest(self, method, path, **kwargs):
pass
def putheader(self, k, v):
pass
def endheaders(self):
pass
def close(self):
self.closed = True
class ConnectionLog(object):
def __init__(self):
self.connections = []
def __len__(self):
return len(self.connections)
def __getitem__(self, i):
return self.connections[i]
def __iter__(self):
return iter(self.connections)
def __call__(self, ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
conn = FakeConn(req)
self.connections.append(conn)
return conn
fake_conn = ConnectionLog()
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
class ECObjectControllerMixin(CommonObjectControllerMixin):
# Add a few helper methods for EC tests.
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
return encode_frag_archive_bodies(policy, test_body)
def _make_ec_object_stub(self, pattern='test', policy=None,
timestamp=None):
policy = policy or self.policy
if isinstance(pattern, six.text_type):
pattern = pattern.encode('utf-8')
test_body = pattern * policy.ec_segment_size
test_body = test_body[:-random.randint(1, 1000)]
return make_ec_object_stub(test_body, policy, timestamp)
def _fake_ec_node_response(self, node_frags):
return fake_ec_node_response(node_frags, self.policy)
def test_GET_with_duplicate_but_sufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
] * self.policy.ec_duplication_factor
node_frags.append({'obj': obj1, 'frag': 5}) # first handoff
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
# expect a request to all primaries plus one handoff
self.assertEqual(self.replicas() + 1, len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_duplicate_but_insufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, but fails to find one
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
# ... and the rest are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# expect a request to all nodes
self.assertEqual(2 * self.replicas(), len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata - 1)
@patch_policies(with_ec_default=True)
class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _add_frag_index(self, index, headers):
# helper method to add a frag index header to an existing header dict
hdr_name = 'X-Object-Sysmeta-Ec-Frag-Index'
return dict(list(headers.items()) + [(hdr_name, index)])
def test_determine_chunk_destinations(self):
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
# create a dummy list of putters, check no handoffs
putters = []
expected = {}
for index in range(self.policy.object_ring.replica_count):
p = FakePutter(index)
putters.append(p)
expected[p] = self.policy.get_backend_index(index)
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
def _test_one_handoff(index):
with mock.patch.object(putters[index], 'node_index', None):
got = controller._determine_chunk_destinations(
putters, self.policy)
self.assertEqual(got, expected)
# Check that we don't mutate the putter
self.assertEqual([p.node_index for p in putters],
[None if i == index else i
for i, _ in enumerate(putters)])
# now lets make a handoff at the end
_test_one_handoff(self.policy.object_ring.replica_count - 1)
# now lets make a handoff at the start
_test_one_handoff(0)
# now lets make a handoff in the middle
_test_one_handoff(2)
# now lets make all of them handoffs
for index in range(self.policy.object_ring.replica_count):
putters[index].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(sorted(got, key=lambda p: id(p)),
sorted(expected, key=lambda p: id(p)))
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_statuses = [200] * self.policy.ec_ndata
get_hdrs = [{
'Connection': 'close',
'X-Object-Sysmeta-Ec-Scheme': self.policy.ec_scheme_description,
}] * self.policy.ec_ndata
with set_http_connect(*get_statuses, headers=get_hdrs):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
self.assertFalse([h for h in resp.headers
if h.lower().startswith('x-object-sysmeta-ec-')])
def test_GET_disconnect(self):
self.app.recoverable_node_timeout = 0.01
self.app.client_timeout = 0.1
# Before, we used the default 64k chunk size, so the entire ~16k test
# data would come in the first chunk, and the generator should
# cleanly exit by the time we reiterate() the response.
self.app.object_chunk_size = 10
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-743]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Timestamp': Timestamp(self.ts()).normal,
}
num_slow = 4
responses = [
(200, SlowBody(body, 0.1 if i < num_slow else 0.0),
self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)
] * self.policy.ec_duplication_factor
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o')
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp.app_iter.close()
self.assertEqual(len(log.requests),
self.policy.ec_ndata + num_slow)
def make_key(r):
r['device'] = r['path'].split('/')[1]
return '%(ip)s:%(port)s/%(device)s' % r
# the first four slow nodes get errors incr'd
expected_error_limiting = {
make_key(r): {
'errors': 1,
'last_error': mock.ANY,
}
for r in log.requests[:4]
}
actual = {}
for n in self.app.get_object_ring(int(self.policy)).devs:
node_key = self.app.error_limiter.node_key(n)
stats = self.app.error_limiter.stats.get(node_key) or {}
if stats:
actual[self.app.error_limiter.node_key(n)] = stats
self.assertEqual(actual, expected_error_limiting)
expected = ["Client disconnected on read of EC frag '/a/c/o'"] * 10
self.assertEqual(
self.app.logger.get_lines_for_level('warning'),
expected)
for read_line in self.app.logger.get_lines_for_level('error'):
self.assertIn("Trying to read EC fragment during GET (retrying)",
read_line)
self.assertEqual(
len(self.logger.logger.records['ERROR']), 4,
'Expected 4 ERROR lines, got %r' % (
self.logger.logger.records['ERROR'], ))
def test_GET_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o')
rest = 2 * self.policy.object_ring.replica_count - 2
codes = [200, 404] + [200] * rest
ts_iter = iter([1, 2] + [1] * rest)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_primaries_error_during_rebalance(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (2 * self.policy.object_ring.replica_count)
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
for i in range(self.policy.object_ring.replica_count - 2):
codes[i] = Timeout()
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.app.error_limiter.stats.clear() # Reset error limiting
# one more timeout is past the tipping point
codes[self.policy.object_ring.replica_count - 2] = Timeout()
with mocked_http_conn(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
self.app.error_limiter.stats.clear() # Reset error limiting
# unless we have tombstones
with mocked_http_conn(*codes, headers={'X-Backend-Timestamp': '1'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def _test_if_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 200)
self.assertEqual(resp.status_int, 200)
# match
resp = _do_test('"data_etag"', 200)
self.assertEqual(resp.status_int, 200)
# no match
resp = _do_test('"frag_etag"', 412)
self.assertEqual(resp.status_int, 412)
# match wildcard against an alternate etag
resp = _do_test('*', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# match against an alternate etag
resp = _do_test('"alt_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# no match against an alternate etag
resp = _do_test('"data_etag"', 412,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 412)
def test_GET_if_match(self):
self._test_if_match('GET')
def test_HEAD_if_match(self):
self._test_if_match('HEAD')
def _test_if_none_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-None-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 304)
self.assertEqual(resp.status_int, 304)
# match
resp = _do_test('"data_etag"', 304)
self.assertEqual(resp.status_int, 304)
# no match
resp = _do_test('"frag_etag"', 200)
self.assertEqual(resp.status_int, 200)
# match wildcard against an alternate etag
resp = _do_test('*', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# match against an alternate etag
resp = _do_test('"alt_etag"', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# no match against an alternate etag
resp = _do_test('"data_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
def test_GET_if_none_match(self):
self._test_if_none_match('GET')
def test_HEAD_if_none_match(self):
self._test_if_none_match('HEAD')
def test_GET_simple_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = [200] * self.policy.ec_ndata
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [503] + [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_no_response_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_feed_remaining_primaries(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
safe_iter = utils.GreenthreadSafeIterator(NodeIter(
self.app, self.policy.object_ring, 0, self.logger,
policy=self.policy, request=Request.blank('')))
controller._fragment_GET_request = lambda *a, **k: next(safe_iter)
pile = utils.GreenAsyncPile(self.policy.ec_ndata)
for i in range(self.policy.ec_ndata):
pile.spawn(controller._fragment_GET_request)
req = swob.Request.blank('/v1/a/c/o')
feeder_q = mock.MagicMock()
def feeder_timeout(*a, **kw):
# simulate trampoline
sleep()
# timeout immediately
raise Empty
feeder_q.get.side_effect = feeder_timeout
controller.feed_remaining_primaries(
safe_iter, pile, req, 0, self.policy,
mock.MagicMock(), feeder_q, mock.MagicMock())
expected_timeout = self.app.get_policy_options(
self.policy).concurrency_timeout
expected_call = mock.call(timeout=expected_timeout)
expected_num_calls = self.policy.ec_nparity + 1
self.assertEqual(feeder_q.get.call_args_list,
[expected_call] * expected_num_calls)
def test_GET_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.recoverable_node_timeout = 0.01
codes = [FakeStatus(404, response_sleep=1.0)] * 2 + \
[200] * (self.policy.ec_ndata)
with mocked_http_conn(*codes) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.policy.ec_ndata + 2, len(log.requests))
self.assertEqual(
len(self.logger.logger.records['ERROR']), 2,
'Expected 2 ERROR lines, got %r' % (
self.logger.logger.records['ERROR'], ))
for retry_line in self.logger.logger.records['ERROR']:
self.assertIn('ERROR with Object server', retry_line)
self.assertIn('Trying to GET', retry_line)
self.assertIn('Timeout (0.01s)', retry_line)
self.assertIn(req.headers['x-trans-id'], retry_line)
def test_GET_with_slow_primaries(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-743]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
req = swift.common.swob.Request.blank('/v1/a/c/o')
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_gets = True
policy_opts.concurrency_timeout = 0.1
status_codes = ([
FakeStatus(200, response_sleep=2.0),
] * self.policy.ec_nparity) + ([
FakeStatus(200),
] * self.policy.ec_ndata)
self.assertEqual(len(status_codes), len(ec_archive_bodies))
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests),
self.policy.ec_n_unique_fragments)
def test_GET_with_some_slow_primaries(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-289]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
req = swift.common.swob.Request.blank('/v1/a/c/o')
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_gets = True
policy_opts.concurrency_timeout = 0.1
slow_count = self.policy.ec_nparity
status_codes = ([
FakeStatus(200, response_sleep=2.0),
] * slow_count) + ([
FakeStatus(200),
] * (self.policy.ec_ndata - slow_count))
random.shuffle(status_codes)
status_codes.extend([
FakeStatus(200),
] * slow_count)
self.assertEqual(len(status_codes), len(ec_archive_bodies))
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests),
self.policy.ec_n_unique_fragments)
def test_ec_concurrent_GET_with_slow_leaders(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-289]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
req = swift.common.swob.Request.blank('/v1/a/c/o')
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_gets = True
policy_opts.concurrency_timeout = 0.0
slow_count = 4
status_codes = ([
FakeStatus(200, response_sleep=0.2),
] * slow_count) + ([
FakeStatus(200, response_sleep=0.1),
] * (self.policy.ec_n_unique_fragments - slow_count))
for i in range(slow_count):
# poison the super slow requests
ec_archive_bodies[i] = ''
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, test_data, '%r != %r' % (
resp.body if len(resp.body) < 60 else '%s...' % resp.body[:60],
test_data if len(test_data) < 60 else '%s...' % test_data[:60],
))
self.assertEqual(len(log.requests), self.policy.ec_n_unique_fragments)
def test_GET_with_slow_nodes_and_failures(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-289]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
req = swift.common.swob.Request.blank('/v1/a/c/o')
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_gets = True
policy_opts.concurrency_timeout = 0.1
unused_resp = [
FakeStatus(200, response_sleep=2.0),
FakeStatus(200, response_sleep=2.0),
500,
416,
]
self.assertEqual(len(unused_resp), self.policy.ec_nparity)
status_codes = (
[200] * (self.policy.ec_ndata - 4)) + unused_resp
self.assertEqual(len(status_codes), self.policy.ec_ndata)
# random.shuffle(status_codes)
# make up for the failures
status_codes.extend([200] * self.policy.ec_nparity)
self.assertEqual(len(status_codes), len(ec_archive_bodies))
bodies_with_errors = []
for code, body in zip(status_codes, ec_archive_bodies):
if code == 500:
bodies_with_errors.append('Kaboom')
elif code == 416:
bodies_with_errors.append('That Range is no.')
else:
bodies_with_errors.append(body)
with mocked_http_conn(*status_codes, body_iter=bodies_with_errors,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests),
self.policy.ec_n_unique_fragments)
def test_GET_with_one_slow_frag_lane(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-454]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
req = swift.common.swob.Request.blank('/v1/a/c/o')
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_gets = True
policy_opts.concurrency_timeout = 0.1
status_codes = [
FakeStatus(200, response_sleep=2.0),
] + ([
FakeStatus(200),
] * (self.policy.ec_ndata - 1))
random.shuffle(status_codes)
status_codes.extend([
FakeStatus(200, response_sleep=2.0),
FakeStatus(200, response_sleep=2.0),
FakeStatus(200, response_sleep=2.0),
FakeStatus(200),
])
self.assertEqual(len(status_codes), len(ec_archive_bodies))
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests),
self.policy.ec_n_unique_fragments)
def test_GET_with_concurrent_ec_extra_requests(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-454]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
ts = self.ts()
headers = []
for i, body in enumerate(ec_archive_bodies):
headers.append({
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(body),
'X-Object-Sysmeta-Ec-Frag-Index':
self.policy.get_backend_index(i),
'X-Backend-Timestamp': ts.internal,
'X-Timestamp': ts.normal,
'X-Backend-Durable-Timestamp': ts.internal,
'X-Backend-Data-Timestamp': ts.internal,
})
policy_opts = self.app.get_policy_options(self.policy)
policy_opts.concurrent_ec_extra_requests = self.policy.ec_nparity - 1
req = swift.common.swob.Request.blank('/v1/a/c/o')
# w/o concurrent_gets ec_extra_requests has no effect
status_codes = [200] * self.policy.ec_ndata
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests), self.policy.ec_ndata)
self.assertEqual(resp.body, test_data)
policy_opts.concurrent_gets = True
status_codes = [200] * (self.policy.object_ring.replicas - 1)
with mocked_http_conn(*status_codes, body_iter=ec_archive_bodies,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log.requests),
self.policy.object_ring.replicas - 1)
self.assertEqual(resp.body, test_data)
def test_GET_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = (b'asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(
fragments * self.policy.ec_duplication_factor)
# sanity
sanity_body = b''
for fragment_payload in fragment_payloads:
sanity_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(real_body), len(sanity_body))
self.assertEqual(real_body, sanity_body)
# list(zip(...)) for py3 compatibility (zip is lazy there)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, b''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(real_body), len(resp.body))
self.assertEqual(real_body, resp.body)
def test_GET_with_frags_swapped_around(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-657]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
_part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o')
node_key = lambda n: (n['ip'], n['port'])
backend_index = self.policy.get_backend_index
ts = self.ts()
response_map = {
node_key(n): StubResponse(
200, ec_archive_bodies[backend_index(i)], {
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Frag-Index': backend_index(i),
'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal
}) for i, n in enumerate(primary_nodes)
}
# swap a parity response into a data node
data_node = random.choice(primary_nodes[:self.policy.ec_ndata])
parity_node = random.choice(
primary_nodes[
self.policy.ec_ndata:self.policy.ec_n_unique_fragments])
(response_map[node_key(data_node)],
response_map[node_key(parity_node)]) = \
(response_map[node_key(parity_node)],
response_map[node_key(data_node)])
def get_response(req):
req_key = (req['ip'], req['port'])
return response_map.pop(req_key)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log), self.policy.ec_ndata)
self.assertEqual(len(response_map),
len(primary_nodes) - self.policy.ec_ndata)
def test_GET_with_no_success(self):
node_frags = [[]] * 28 # no frags on any node
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_only_handoffs(self):
obj1 = self._make_ec_object_stub()
node_frags = [[]] * self.replicas() # all primaries missing
node_frags = node_frags + [ # handoffs
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj1, 'frag': 9},
{'obj': obj1, 'frag': 10}, # parity
{'obj': obj1, 'frag': 11}, # parity
{'obj': obj1, 'frag': 12}, # parity
{'obj': obj1, 'frag': 13}, # parity
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
collected_responses = defaultdict(list)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].append(index)
# GETS would be required to all primaries and then ndata handoffs
self.assertEqual(len(log), self.replicas() + self.policy.ec_ndata)
self.assertEqual(2, len(collected_responses))
# 404s
self.assertEqual(self.replicas(), len(collected_responses[None]))
self.assertEqual(self.policy.ec_ndata,
len(collected_responses[obj1['etag']]))
def test_GET_with_single_missed_overwrite_does_not_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed over write
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj2, 'frag': 6},
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj2, 'frag': 9},
{'obj': obj2, 'frag': 10}, # parity
{'obj': obj2, 'frag': 11}, # parity
{'obj': obj2, 'frag': 12}, # parity
{'obj': obj2, 'frag': 13}, # parity
# {'obj': obj2, 'frag': 2}, # handoff (not used in this test)
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with mock.patch('swift.proxy.server.shuffle', lambda n: n), \
capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
closed_conn[etag].add(conn.closed)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {False},
}, closed_conn)
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj2['etag'])
self.assertEqual({True}, {conn.closed for conn in log})
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
self.assertEqual(len(log), self.policy.ec_ndata + 1)
expected = {
obj1['etag']: 1,
obj2['etag']: self.policy.ec_ndata,
}
self.assertEqual(expected, {
e: len(f) for e, f in collected_responses.items()})
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
{'obj': obj2, 'frag': 9},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
# handoffs are iter'd in order so proxy will see 404 from this
# final handoff
[],
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers.get('X-Object-Sysmeta-Ec-Etag')
closed_conn[etag].add(conn.closed)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {True},
None: {True},
}, dict(closed_conn))
self.assertEqual(resp.status_int, 503)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_and_hidden_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
[{'obj': obj1, 'frag': 0}, {'obj': obj1, 'frag': 5}],
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
# Expect a maximum of one request to each primary plus one extra
# request to node 1. Actual value could be less if the extra request
# occurs and quorum is reached before requests to nodes with a
# duplicate frag.
self.assertLessEqual(len(log), self.replicas() + 1)
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_nondurable_frags_and_will_404(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj3, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj3, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj3, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj3, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj3, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj3, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj3, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj3, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj3, 'frag': 8, 'durable': False},
{'obj': obj4, 'frag': 8, 'durable': False},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_etags = set()
collected_status = set()
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
closed_conn[etag].add(conn.closed)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {True},
obj3['etag']: {True},
obj4['etag']: {True},
}, closed_conn)
def test_GET_with_mixed_durable_and_nondurable_frags_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# but since one is marked durable we *should* be able to reconstruct,
# so proxy should 503
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj3, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj3, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj3, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj3, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj3, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj3, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj3, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj3, 'frag': 7},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj3, 'frag': 8, 'durable': False},
{'obj': obj4, 'frag': 8, 'durable': False},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
closed_conn = defaultdict(set)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
closed_conn[etag].add(conn.closed)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {True},
obj3['etag']: {True},
obj4['etag']: {True},
}, closed_conn)
def test_GET_with_mixed_durable_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# and since at least one is marked durable we *should* be able to
# reconstruct, so proxy will 503
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj3, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{'obj': obj3, 'frag': 8},
{'obj': obj4, 'frag': 8},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for conn in log:
etag = conn.resp.headers.get('X-Object-Sysmeta-Ec-Etag')
collected_etags = set()
collected_status = set()
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
closed_conn[etag].add(conn.closed)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {True},
obj3['etag']: {True},
obj4['etag']: {True},
}, closed_conn)
def test_GET_with_quorum_durable_files(self):
# verify that only (ec_nparity + 1) nodes need to be durable for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': True}, # durable
{'obj': obj1, 'frag': 2, 'durable': True}, # durable
{'obj': obj1, 'frag': 3, 'durable': True}, # durable
{'obj': obj1, 'frag': 4, 'durable': True}, # durable
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs all 404
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
self.assertGreaterEqual(len(log), self.policy.ec_ndata)
collected_durables = []
for conn in log:
if not conn.resp.headers.get('X-Backend-Data-Timestamp'):
continue
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many durables are
# returned but it must be at least 1 and cannot exceed 5
self.assertLessEqual(len(collected_durables), 5)
self.assertGreaterEqual(len(collected_durables), 1)
def test_GET_with_single_durable_file(self):
# verify that a single durable is sufficient for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs all 404
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
collected_durables = []
for conn in log:
if not conn.resp.headers.get('X-Backend-Data-Timestamp'):
continue
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many non-durables
# are returned before the durable, but we do expect a single durable
self.assertEqual(1, len(collected_durables))
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(28, len(log))
def test_GET_with_missing_durable_files_and_mixed_etags(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# non-quorate durables for another object won't stop us finding the
# quorate object
node_frags = [
# ec_ndata - 1 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
{'obj': obj2, 'frag': 7, 'durable': True},
{'obj': obj2, 'frag': 8, 'durable': True},
# ec_ndata frags of obj1 are available and one is durable
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': True},
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers.get('X-Object-Sysmeta-Ec-Etag')
closed_conn[etag].add(conn.closed)
self.assertEqual({
obj1['etag']: {False},
obj2['etag']: {True},
}, closed_conn)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
# Quorum of non-durables for a different object won't
# prevent us hunting down the durable object
node_frags = [
# primaries
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 9, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 12, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': False},
# handoffs
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': True}, # parity
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj1['etag'])
def test_GET_with_missing_durables_and_older_durables(self):
# scenario: non-durable frags of newer obj1 obscure all durable frags
# of older obj2, so first 14 requests result in a non-durable set.
# At that point (or before) the proxy knows that a durable set of
# frags for obj2 exists so will fetch them, requiring another 10
# directed requests.
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': True}],
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers.get('X-Object-Sysmeta-Ec-Etag')
closed_conn[etag].add(conn.closed)
self.assertEqual({
obj1['etag']: {True},
obj2['etag']: {False},
}, closed_conn)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
def test_GET_with_missing_durables_and_older_obscured_durables(self):
# scenario: obj3 has 14 frags but only 2 are durable and these are
# obscured by two non-durable frags of obj1. There is also a single
# non-durable frag of obj2. The proxy will need to do at least 10
# GETs to see all the obj3 frags plus 1 more to GET a durable frag.
# The proxy may also do one more GET if the obj2 frag is found.
# i.e. 10 + 1 durable for obj3, 2 for obj1 and 1 more if obj2 found
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
obj3 = self._make_ec_object_stub(pattern='obj3', timestamp=self.ts())
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 0, 'durable': True}],
[{'obj': obj1, 'frag': 1, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 1, 'durable': True}],
[{'obj': obj2, 'frag': 2, 'durable': False}, # obj2 frag
{'obj': obj3, 'frag': 2, 'durable': False}],
[{'obj': obj3, 'frag': 3, 'durable': False}],
[{'obj': obj3, 'frag': 4, 'durable': False}],
[{'obj': obj3, 'frag': 5, 'durable': False}],
[{'obj': obj3, 'frag': 6, 'durable': False}],
[{'obj': obj3, 'frag': 7, 'durable': False}],
[{'obj': obj3, 'frag': 8, 'durable': False}],
[{'obj': obj3, 'frag': 9, 'durable': False}],
[{'obj': obj3, 'frag': 10, 'durable': False}],
[{'obj': obj3, 'frag': 11, 'durable': False}],
[{'obj': obj3, 'frag': 12, 'durable': False}],
[{'obj': obj3, 'frag': 13, 'durable': False}],
] + [[]] * self.replicas() # handoffs 404
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj3['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj3['etag'])
self.assertGreaterEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(log), (self.policy.ec_ndata * 2) + 1)
def test_GET_with_missing_durables_and_older_non_durables(self):
# scenario: non-durable frags of newer obj1 obscure all frags
# of older obj2, so first 28 requests result in a non-durable set.
# There are only 10 frags for obj2 and one is not durable.
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False}], # obj2 non-durable
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
[], # 1 empty primary
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(
resp.body, usedforsecurity=False).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj2 frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj2 frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# this scenario should never occur but if there are somehow
# fragments for different content at the same timestamp then the
# object controller should handle it gracefully
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertTrue(resp.body)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7
+ ['Object returning 503 for []'])
# Note the empty list above -- that log line comes out of
# best_response but we've already thrown out the "good" responses :-/
def test_GET_mixed_success_with_range(self):
fragment_size = self.policy.fragment_size
ec_stub = self._make_ec_object_stub()
frag_archives = ec_stub['frags']
frag_archive_size = len(ec_stub['frags'][0])
headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
# data nodes with old frag
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
# hopefully we ask for two more
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
# verify that even when last responses to be collected are 416's
# the shortfall of 2xx responses still triggers extra spawned requests
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
# data nodes with old frag
StubResponse(416, frag_index=5),
# hopefully we ask for one more
StubResponse(416, frag_index=6),
# and hopefully we ask for another
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, b'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_range_unsatisfiable_mixed_success(self):
responses = [
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
# sneak a couple bogus extra responses
StubResponse(404),
StubResponse(206, frag_index=8),
# and then just "enough" more 416's
StubResponse(416, frag_index=9),
StubResponse(416, frag_index=10),
StubResponse(416, frag_index=11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
# we're going to engage ndata primaries, plus the bogus extra
# self.assertEqual(len(log), self.policy.ec_ndata + 2)
self.assertEqual([c.resp.status for c in log],
([416] * 7) + [404, 206] + ([416] * 3))
def test_GET_with_missing_and_range_unsatisifiable(self):
responses = [ # not quite ec_ndata frags on primaries
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(416, frag_index=7),
StubResponse(416, frag_index=8),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
# TODO: does 416 make sense without a quorum, or should this be a 404?
# a non-range GET of same object would return 404
self.assertEqual(resp.status_int, 416)
self.assertEqual(len(log), 2 * self.replicas())
@patch_policies(
[ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4,
ec_nparity=4, ec_segment_size=4096)],
fake_ring_args=[{'replicas': 8}]
)
def test_GET_ndata_equals_nparity_with_missing_and_errors(self):
# when ec_ndata == ec_nparity it is possible for the shortfall of a bad
# bucket (412's) to equal ec_ndata; verify that the 412 bucket is still
# chosen ahead of the initial 'dummy' bad bucket
POLICIES.default.object_ring.max_more_nodes = 8
responses = [
StubResponse(412, frag_index=0),
StubResponse(412, frag_index=1),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
self.assertEqual(len(log), 2 * 8)
def test_GET_with_multirange(self):
self.app.object_chunk_size = 256
test_body = b'test' * self.policy.ec_segment_size
ec_stub = make_ec_object_stub(test_body, self.policy, None)
frag_archives = ec_stub['frags']
self.assertEqual(len(frag_archives[0]), 1960)
boundary = b'81eb9c110b32ced5fe'
def make_mime_body(frag_archive):
return b'\r\n'.join([
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 0-489/1960',
b'',
frag_archive[0:490],
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1470-1959/1960',
b'',
frag_archive[1470:],
b'--' + boundary + b'--',
])
obj_resp_bodies = [make_mime_body(fa) for fa
in ec_stub['frags'][:self.policy.ec_ndata]]
headers = {
'Content-Type': b'multipart/byteranges;boundary=' + boundary,
'Content-Length': len(obj_resp_bodies[0]),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [
StubResponse(206, body, headers, i)
for i, body in enumerate(obj_resp_bodies)
]
def get_response(req):
# there's some math going on here I don't quite understand, the
# fragment_size is 490 and there's like 4 of them because ec_body
# is 'test' * segment_size
self.assertEqual(req['headers']['Range'], 'bytes=0-489,1470-1959')
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=1000-2000,14000-15000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(len(log), self.policy.ec_ndata)
resp_boundary = resp.headers['content-type'].rsplit('=', 1)[1].encode()
expected = b'\r\n'.join([
b'--' + resp_boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1000-2000/16384',
b'',
ec_stub['body'][1000:2001],
b'--' + resp_boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 14000-15000/16384',
b'',
ec_stub['body'][14000:15001],
b'--' + resp_boundary + b'--',
])
self.assertEqual(resp.body, expected)
def test_GET_with_multirange_slow_body(self):
self.app.object_chunk_size = 256
self.app.recoverable_node_timeout = 0.01
test_body = b'test' * self.policy.ec_segment_size
ec_stub = make_ec_object_stub(test_body, self.policy, None)
frag_archives = ec_stub['frags']
self.assertEqual(len(frag_archives[0]), 1960)
boundary = b'81eb9c110b32ced5fe'
def make_mime_body(frag_archive):
return b'\r\n'.join([
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 0-489/1960',
b'',
frag_archive[0:490],
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1470-1959/1960',
b'',
frag_archive[1470:],
b'--' + boundary + b'--',
])
obj_resp_bodies = [make_mime_body(fa) for fa
in ec_stub['frags'][:self.policy.ec_ndata + 1]]
headers = {
'Content-Type': b'multipart/byteranges;boundary=' + boundary,
'Content-Length': len(obj_resp_bodies[0]),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [
StubResponse(206, body, headers, i,
# make the first one slow
slowdown=0.1 if i == 0 else None)
for i, body in enumerate(obj_resp_bodies)
]
def get_response(req):
# there's some math going on here I don't quite understand, the
# fragment_size is 490 and there's like 4 of them because ec_body
# is 'test' * segment_size
self.assertEqual(req['headers']['Range'], 'bytes=0-489,1470-1959')
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=1000-2000,14000-15000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(len(log), self.policy.ec_ndata + 1)
resp_boundary = resp.headers['content-type'].rsplit('=', 1)[1].encode()
expected = b'\r\n'.join([
b'--' + resp_boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1000-2000/16384',
b'',
ec_stub['body'][1000:2001],
b'--' + resp_boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 14000-15000/16384',
b'',
ec_stub['body'][14000:15001],
b'--' + resp_boundary + b'--',
])
self.assertEqual(resp.body, expected)
def test_GET_with_multirange_unable_to_resume(self):
self.app.object_chunk_size = 256
self.app.recoverable_node_timeout = 0.01
test_body = b'test' * self.policy.ec_segment_size
ec_stub = make_ec_object_stub(test_body, self.policy, None)
frag_archives = ec_stub['frags']
self.assertEqual(len(frag_archives[0]), 1960)
boundary = b'81eb9c110b32ced5fe'
def make_mime_body(frag_archive):
return b'\r\n'.join([
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 0-489/1960',
b'',
frag_archive[0:490],
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1470-1959/1960',
b'',
frag_archive[1470:],
b'--' + boundary + b'--',
])
obj_resp_bodies = [make_mime_body(fa) for fa
# no extra good responses
in ec_stub['frags'][:self.policy.ec_ndata]]
headers = {
'Content-Type': b'multipart/byteranges;boundary=' + boundary,
'Content-Length': len(obj_resp_bodies[0]),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [
StubResponse(206, body, headers, i,
# make the first one slow
slowdown=0.1 if i == 0 else None)
for i, body in enumerate(obj_resp_bodies)
]
def get_response(req):
# there's some math going on here I don't quite understand, the
# fragment_size is 490 and there's like 4 of them because ec_body
# is 'test' * segment_size
self.assertEqual(req['headers']['Range'], 'bytes=0-489,1470-1959')
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=1000-2000,14000-15000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
self.assertEqual(len(log), self.policy.ec_n_unique_fragments * 2)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(log_lines), log_lines)
self.assertIn('Trying to read during GET: ChunkReadTimeout',
log_lines[0])
# not the most graceful ending
self.assertIn('Unhandled exception in request: ChunkReadTimeout',
log_lines[1])
def test_GET_with_multirange_short_resume_body(self):
self.app.object_chunk_size = 256
self.app.recoverable_node_timeout = 0.01
test_body = b'test' * self.policy.ec_segment_size
ec_stub = make_ec_object_stub(test_body, self.policy, None)
frag_archives = ec_stub['frags']
self.assertEqual(len(frag_archives[0]), 1960)
boundary = b'81eb9c110b32ced5fe'
def make_mime_body(frag_archive):
return b'\r\n'.join([
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 0-489/1960',
b'',
frag_archive[0:490],
b'--' + boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1470-1959/1960',
b'',
frag_archive[1470:],
b'--' + boundary + b'--',
])
obj_resp_bodies = [make_mime_body(fa) for fa
# no extra good responses
in ec_stub['frags'][:self.policy.ec_ndata]]
headers = {
'Content-Type': b'multipart/byteranges;boundary=' + boundary,
'Content-Length': len(obj_resp_bodies[0]),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self.ts()).normal,
}
responses = [
StubResponse(206, body, headers, i,
# make the first one slow
slowdown=0.1 if i == 0 else None)
for i, body in enumerate(obj_resp_bodies)
]
# add a short read response for the resume
short_body = obj_resp_bodies[0][:512]
responses.append(StubResponse(206, short_body, headers, 0))
def get_response(req):
# there's some math going on here I don't quite understand, the
# fragment_size is 490 and there's like 4 of them because ec_body
# is 'test' * segment_size
self.assertEqual(req['headers']['Range'], 'bytes=0-489,1470-1959')
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=1000-2000,14000-15000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
resp_boundary = resp.headers['content-type'].rsplit(
'=', 1)[1].encode()
expected = b'\r\n'.join([
b'--' + resp_boundary,
b'Content-Type: application/octet-stream',
b'Content-Range: bytes 1000-2000/16384',
b'',
b'',
b'--' + resp_boundary + b'--',
])
self.assertEqual(expected, resp.body)
self.assertEqual(resp.status_int, 206)
self.assertEqual(len(log), self.policy.ec_n_unique_fragments * 2)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertIn("Trying to read next part of EC multi-part "
"GET (retrying)", log_lines[0])
# not the most graceful ending
self.assertIn("Exception fetching fragments for '/a/c/o'",
log_lines[-1])
def test_GET_with_success_and_507_will_503(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# bad disk on all other nodes
return responses.pop(0) if responses else StubResponse(507)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_404_will_404(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# no frags on other nodes
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_ranged_get(self):
self.app.object_chunk_size = 256
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
data = (b'test' * segment_size)[:-492]
etag = md5(data).hexdigest()
archives = self._make_ec_archive_bodies(data)
frag_archive_size = len(archives[0])
range_size = frag_size * 2
headers = {
'Content-Type': 'text/plain',
'Content-Length': range_size,
'Content-Range': 'bytes 0-%s/%s' % (range_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
responses = [
StubResponse(206, body[:range_size], headers, i)
for i, body in enumerate(archives[:self.policy.ec_ndata])
]
obj_req_ranges = set()
def get_response(req):
obj_req_ranges.add(req['headers']['Range'])
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=3000-5000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(obj_req_ranges, {'bytes=0-%s' % (range_size - 1)})
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.headers['Content-Range'],
'bytes 3000-5000/%s' % len(data))
self.assertEqual(resp.body, data[3000:5001])
self.assertEqual(len(log), self.policy.ec_ndata)
def test_ranged_get_with_slow_resp(self):
self.app.object_chunk_size = 256
self.app.recoverable_node_timeout = 0.01
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
data = (b'test' * segment_size)[:-492]
etag = md5(data).hexdigest()
archives = self._make_ec_archive_bodies(data)
frag_archive_size = len(archives[0])
range_size = frag_size * 2
headers = {
'Content-Type': 'text/plain',
'Content-Length': range_size,
'Content-Range': 'bytes 0-%s/%s' % (range_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
responses = [
StubResponse(206, body[:range_size], headers, i,
# the first body comes up slow
slowdown=0.1 if i == 0 else None)
for i, body in enumerate(archives[:self.policy.ec_ndata])
]
responses.append(StubResponse(
206, archives[self.policy.ec_ndata][:range_size],
headers, self.policy.ec_ndata))
obj_req_ranges = set()
def get_response(req):
obj_req_ranges.add(req['headers']['Range'])
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=3000-5000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.body, data[3000:5001])
self.assertEqual(resp.status_int, 206)
self.assertEqual(obj_req_ranges, {'bytes=0-%s' % (range_size - 1)})
self.assertEqual(resp.headers['Content-Range'],
'bytes 3000-5000/%s' % len(data))
self.assertEqual(len(log), self.policy.ec_ndata + 1)
def test_ranged_get_with_short_resp(self):
self.app.object_chunk_size = 256
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
data = (b'test' * segment_size)[:-492]
etag = md5(data).hexdigest()
archives = self._make_ec_archive_bodies(data)
frag_archive_size = len(archives[0])
range_size = frag_size * 2
headers = {
'Content-Type': 'text/plain',
'Content-Length': range_size,
'Content-Range': 'bytes 0-%s/%s' % (range_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
responses = [
StubResponse(
206,
# the first body comes up short
body[:frag_size] if i == 0 else body[:range_size],
headers, i)
for i, body in enumerate(archives[:self.policy.ec_ndata])
]
responses.append(StubResponse(
206, archives[self.policy.ec_ndata][frag_size:range_size], {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes %s-%s/%s' % (
frag_size, range_size - 1, frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal,
}, self.policy.ec_ndata))
obj_req_ranges = []
def get_response(req):
obj_req_ranges.append(req['headers']['Range'])
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=3000-5000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.body, data[3000:5001])
self.assertEqual(resp.status_int, 206)
self.assertEqual(obj_req_ranges,
['bytes=0-%s' % (range_size - 1)] *
self.policy.ec_ndata +
['bytes=%s-%s' % (frag_size, range_size - 1)])
self.assertEqual(resp.headers['Content-Range'],
'bytes 3000-5000/%s' % len(data))
self.assertEqual(len(log), self.policy.ec_ndata + 1)
def test_ranged_get_with_short_resp_timeout(self):
self.app.object_chunk_size = 256
self.app.recoverable_node_timeout = 0.01
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
data = (b'test' * segment_size)[:-492]
etag = md5(data).hexdigest()
archives = self._make_ec_archive_bodies(data)
frag_archive_size = len(archives[0])
range_size = frag_size * 2
headers = {
'Content-Type': 'text/plain',
'Content-Length': range_size,
'Content-Range': 'bytes 0-%s/%s' % (range_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
responses = [
StubResponse(
206, body[:range_size], headers, i,
# the first body slows down after awhile
slowdown=[None] * 3 + [0.1] if i == 0 else None)
for i, body in enumerate(archives[:self.policy.ec_ndata])
]
responses.append(StubResponse(
206, archives[self.policy.ec_ndata][frag_size:range_size], {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes %s-%s/%s' % (
frag_size, range_size - 1, frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal,
}, self.policy.ec_ndata))
obj_req_ranges = []
def get_response(req):
obj_req_ranges.append(req['headers']['Range'])
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=3000-5000'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.body, data[3000:5001])
self.assertEqual(resp.status_int, 206)
self.assertEqual(['bytes=0-%s' % (range_size - 1)] *
self.policy.ec_ndata +
['bytes=%s-%s' % (frag_size, range_size - 1)],
obj_req_ranges)
self.assertEqual(resp.headers['Content-Range'],
'bytes 3000-5000/%s' % len(data))
self.assertEqual(len(log), self.policy.ec_ndata + 1)
def test_GET_mixed_ranged_responses_success(self):
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
new_data = (b'test' * segment_size)[:-492]
new_etag = md5(new_data, usedforsecurity=False).hexdigest()
new_archives = self._make_ec_archive_bodies(new_data)
old_data = (b'junk' * segment_size)[:-492]
old_etag = md5(old_data, usedforsecurity=False).hexdigest()
old_archives = self._make_ec_archive_bodies(old_data)
frag_archive_size = len(new_archives[0])
# here we deliberately omit X-Backend-Data-Timestamp to check that
# proxy will tolerate responses from object server that have not been
# upgraded to send that header
old_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(old_data),
'X-Object-Sysmeta-Ec-Etag': old_etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
new_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(new_data),
'X-Object-Sysmeta-Ec-Etag': new_etag,
'X-Backend-Timestamp': Timestamp(self.ts()).internal
}
# 7 primaries with stale frags, 3 handoffs failed to get new frags
responses = [
StubResponse(206, old_archives[0][:frag_size], old_headers, 0),
StubResponse(206, new_archives[1][:frag_size], new_headers, 1),
StubResponse(206, old_archives[2][:frag_size], old_headers, 2),
StubResponse(206, new_archives[3][:frag_size], new_headers, 3),
StubResponse(206, old_archives[4][:frag_size], old_headers, 4),
StubResponse(206, new_archives[5][:frag_size], new_headers, 5),
StubResponse(206, old_archives[6][:frag_size], old_headers, 6),
StubResponse(206, new_archives[7][:frag_size], new_headers, 7),
StubResponse(206, old_archives[8][:frag_size], old_headers, 8),
StubResponse(206, new_archives[9][:frag_size], new_headers, 9),
StubResponse(206, old_archives[10][:frag_size], old_headers, 10),
StubResponse(206, new_archives[11][:frag_size], new_headers, 11),
StubResponse(206, old_archives[12][:frag_size], old_headers, 12),
StubResponse(206, new_archives[13][:frag_size], new_headers, 13),
StubResponse(206, new_archives[0][:frag_size], new_headers, 0),
StubResponse(404),
StubResponse(404),
StubResponse(206, new_archives[6][:frag_size], new_headers, 6),
StubResponse(404),
StubResponse(206, new_archives[10][:frag_size], new_headers, 10),
StubResponse(206, new_archives[12][:frag_size], new_headers, 12),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
closed_conn = defaultdict(set)
for conn in log:
etag = conn.resp.headers.get('X-Object-Sysmeta-Ec-Etag')
closed_conn[etag].add(conn.closed)
self.assertEqual({
old_etag: {True},
new_etag: {False},
None: {True},
}, dict(closed_conn))
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, new_data[:segment_size])
self.assertEqual(len(log), self.policy.ec_ndata + 10)
def test_GET_mismatched_fragment_archives(self):
segment_size = self.policy.ec_segment_size
test_data1 = (b'test' * segment_size)[:-333]
# N.B. the object data *length* here is different
test_data2 = (b'blah1' * segment_size)[:-333]
etag1 = md5(test_data1, usedforsecurity=False).hexdigest()
etag2 = md5(test_data2, usedforsecurity=False).hexdigest()
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
# here we're going to *lie* and say the etag here matches
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
responses1 = [(200, body, self._add_frag_index(fi, headers1))
for fi, body in enumerate(ec_archive_bodies1)]
responses2 = [(200, body, self._add_frag_index(fi, headers2))
for fi, body in enumerate(ec_archive_bodies2)]
req = swob.Request.blank('/v1/a/c/o')
orig_decode = self.policy.pyeclib_driver.decode
captured_fragments = []
def mock_decode(fragments):
captured_fragments.append(fragments)
return orig_decode(fragments)
# sanity check responses1 and capture frag lengths
responses = responses1[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
with mock.patch.object(
self.policy.pyeclib_driver, 'decode', mock_decode):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# read body while decode is mocked
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
etag1)
fragment_lengths1 = [[len(frag) for frag in frags]
for frags in captured_fragments]
self.assertEqual( # sanity check
len(ec_archive_bodies1[0]),
sum([length for length in [lengths[0]
for lengths in fragment_lengths1]]))
# sanity check responses2 and capture frag lengths
captured_fragments = []
responses = responses2[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
with mock.patch.object(
self.policy.pyeclib_driver, 'decode', mock_decode):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# read body while decode is mocked
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
etag2)
fragment_lengths2 = [[len(frag) for frag in frags]
for frags in captured_fragments]
self.assertEqual( # sanity check
len(ec_archive_bodies2[0]),
sum([length for length in [lengths[0]
for lengths in fragment_lengths2]]))
# now mix the responses a bit
mix_index = random.randint(0, self.policy.ec_ndata - 1)
mixed_responses = responses1[:self.policy.ec_ndata]
mixed_responses[mix_index] = responses2[mix_index]
num_segments = len(fragment_lengths1)
mixed_lengths = fragment_lengths1[num_segments - 1]
mixed_lengths[mix_index] = fragment_lengths2[
num_segments - 1][mix_index]
status_codes, body_iter, headers = zip(*mixed_responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
try:
resp.body
except ECDriverError:
resp._app_iter.close()
else:
self.fail('invalid ec fragment response body did not blow up!')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
msg = error_lines[0]
self.assertIn('Error decoding fragments', msg)
self.assertIn('/a/c/o', msg)
self.assertIn('Segments decoded: %d' % (num_segments - 1), msg)
self.assertIn(
"[%s]" % ", ".join([str(length) for length in mixed_lengths]), msg)
self.assertIn("Invalid fragment payload in ECPyECLibDriver.decode",
msg)
def test_GET_read_timeout(self):
# verify EC GET behavior when initial batch of nodes time out then
# remaining primary nodes also time out and handoffs return 404
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
responses = [
(200, SlowBody(body, 0.1), self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses + [
(404, [b''], {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
etag)
error_lines = self.logger.get_lines_for_level('error')
nparity = self.policy.ec_nparity
self.assertGreater(len(error_lines), nparity)
for line in error_lines[:nparity]:
self.assertIn('retrying', line)
for line in error_lines[nparity:]:
self.assertIn('ChunkReadTimeout (0.01s', line)
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
def test_GET_write_timeout(self):
# verify EC GET behavior when there's a timeout sending decoded frags
# via the queue.
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
ndata = self.policy.ec_ndata
responses = [
(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[:ndata])
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses)
self.app.client_timeout = 0.01
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_body = next(resp.app_iter)
sleep(0.5) # lazy client
# remaining resp truncated
resp_body += b''.join(resp.app_iter)
# we log errors
log_lines = self.app.logger.get_lines_for_level('error')
self.assertTrue(log_lines)
for line in log_lines:
self.assertIn('ChunkWriteTimeout feeding fragments', line)
# client gets a short read
self.assertEqual(16051, len(test_data))
self.assertEqual(8192, len(resp_body))
self.assertNotEqual(
md5(resp_body, usedforsecurity=False).hexdigest(),
etag)
def test_GET_read_timeout_retrying_but_no_more_useful_nodes(self):
# verify EC GET behavior when initial batch of nodes time out then
# remaining nodes either return 404 or return data for different etag
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
ndata = self.policy.ec_ndata
# only ndata responses, all of which have SlowBody
responses = [
(200, SlowBody(body, 0.1), self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[:ndata])
] * self.policy.ec_duplication_factor
# 2 primaries return 404
responses += [
(404, '', {}), (404, '', {})
] * self.policy.ec_duplication_factor
# 2 primaries return different etag
headers2 = {'X-Object-Sysmeta-Ec-Etag': 'other_etag'}
responses += [
(200, body, self._add_frag_index(i, headers2))
for i, body in enumerate(ec_archive_bodies[ndata + 2:])
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
# all other (handoff) responses are 404
status_codes, body_iter, headers = zip(*responses + [
(404, [b''], {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(ndata, len(error_lines))
for line in error_lines:
self.assertIn('ChunkReadTimeout (0.01s', line)
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
debug_lines = self.logger.get_lines_for_level('debug')
nparity = self.policy.ec_nparity
nhandoffs = self.policy.object_ring.max_more_nodes
ignore_404 = ignore_404_handoff = 0
for line in debug_lines:
if 'Ignoring 404 from primary' in line:
ignore_404 += 1
if 'Ignoring 404 from handoff' in line:
ignore_404_handoff += 1
self.assertEqual(nparity - 2, ignore_404, debug_lines)
self.assertEqual(nhandoffs, ignore_404_handoff, debug_lines)
self.assertEqual(len(debug_lines), ignore_404_handoff + ignore_404)
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Skipping source (etag mismatch: got other_etag, '
'expected %s)' % etag] * 2)
def test_GET_read_timeout_resume(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data, usedforsecurity=False).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1),
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata + 1])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertIn('retrying', error_lines[0])
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
def test_GET_read_timeout_fails(self):
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1),
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata])
# I don't know why fast_forward would blow up, but if it does we
# re-raise the ChunkReadTimeout and still want a txn-id
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers), \
mock.patch(
'swift.proxy.controllers.obj.ECFragGetter.fast_forward',
side_effect=ValueError()):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
self.assertIn('Unable to fast forward', error_lines[0])
self.assertIn('Timeout fetching', error_lines[1])
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines))
self.assertIn(
'Un-recoverable fragment rebuild. Only received 9/10 fragments',
warning_lines[0])
for line in self.logger.logger.records['ERROR'] + \
self.logger.logger.records['WARNING']:
self.assertIn(req.headers['x-trans-id'], line)
def test_GET_one_short_fragment_archive(self):
# verify that a warning is logged when one fragment archive returns
# less whole fragments than others
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
def do_test(missing_length):
self.logger.clear()
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
}
responses = [(200, ec_archive_bodies[0][:(-1 * missing_length)],
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [
(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual([], error_lines)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines))
self.assertIn(
'Un-recoverable fragment rebuild. '
'Only received 9/10 fragments', warning_lines[0])
# each fragment archive has 4 fragments of sizes [490, 490, 490, 458];
# try dropping whole fragment(s) from one archive
do_test(458)
do_test(490 + 458)
do_test(490 + 490 + 458)
do_test(490 + 490 + 490 + 458)
def test_GET_trigger_ec_metadata_check_failure(self):
# verify that a warning is logged when there are only k - 1 fragment
segment_size = self.policy.ec_segment_size
test_data = (b'test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
bad_bodies = [b'd' * segment_size] * (self.policy.ec_nparity + 1)
ec_archive_bodies = \
ec_archive_bodies[:self.policy.ec_ndata - 1] + bad_bodies
self.logger.clear()
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
}
responses = [
(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
expected_log_line = (
"Error decoding fragments for '/a/c/o'. "
"Segments decoded: 0, Lengths: "
"[490, 490, 490, 490, 490, 490, 490, 490, 490, 490]: "
"pyeclib_c_decode ERROR: Fragment integrity check failed. "
"Please inspect syslog for liberasurecode error report.")
self.assertEqual(expected_log_line, error_lines[0])
def test_GET_read_timeout_resume_mixed_etag(self):
segment_size = self.policy.ec_segment_size
test_data2 = (b'blah1' * segment_size)[:-333]
test_data1 = (b'test' * segment_size)[:-333]
etag2 = md5(test_data2, usedforsecurity=False).hexdigest()
etag1 = md5(test_data1, usedforsecurity=False).hexdigest()
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag2,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data2),
'X-Backend-Timestamp': self.ts().internal}
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': len(test_data1),
'X-Backend-Timestamp': self.ts().internal}
responses = [
# 404
(404, [b''], {}),
# etag1
(200, ec_archive_bodies1[1], self._add_frag_index(1, headers1)),
# 404
(404, [b''], {}),
# etag1
(200, SlowBody(ec_archive_bodies1[3], 0.1), self._add_frag_index(
3, headers1)),
# etag2
(200, ec_archive_bodies2[4], self._add_frag_index(4, headers2)),
# etag1
(200, ec_archive_bodies1[5], self._add_frag_index(5, headers1)),
# etag2
(200, ec_archive_bodies2[6], self._add_frag_index(6, headers2)),
# etag1
(200, ec_archive_bodies1[7], self._add_frag_index(7, headers1)),
# etag2
(200, ec_archive_bodies2[8], self._add_frag_index(8, headers2)),
# etag1
(200, SlowBody(ec_archive_bodies1[9], 0.1), self._add_frag_index(
9, headers1)),
# etag2
(200, ec_archive_bodies2[10], self._add_frag_index(10, headers2)),
# etag1
(200, ec_archive_bodies1[11], self._add_frag_index(11, headers1)),
# etag2
(200, ec_archive_bodies2[12], self._add_frag_index(12, headers2)),
# 404
(404, [b''], {}),
# handoffs start here
# etag2
(200, ec_archive_bodies2[0], self._add_frag_index(0, headers2)),
# 404
(404, [b''], {}),
# etag1
(200, ec_archive_bodies1[2], self._add_frag_index(2, headers1)),
# 404
(404, [b''], {}),
# etag1
(200, ec_archive_bodies1[4], self._add_frag_index(4, headers1)),
# etag2
(200, ec_archive_bodies2[1], self._add_frag_index(1, headers2)),
# etag1
(200, ec_archive_bodies1[6], self._add_frag_index(6, headers1)),
# etag2
(200, ec_archive_bodies2[7], self._add_frag_index(7, headers2)),
# etag1
(200, ec_archive_bodies1[8], self._add_frag_index(8, headers1)),
# resume requests start here
# 404
(404, [b''], {}),
# etag2
(200, ec_archive_bodies2[3], self._add_frag_index(3, headers2)),
# 404
(404, [b''], {}),
# etag1
(200, ec_archive_bodies1[10], self._add_frag_index(10, headers1)),
# etag1
(200, ec_archive_bodies1[12], self._add_frag_index(12, headers1)),
]
self.app.recoverable_node_timeout = 0.01
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*status_codes, body_iter=body_iter,
headers=headers) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
etag1)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for line in error_lines:
self.assertIn('retrying', line)
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
etag2_conns = []
for conn in log.responses:
if conn.headers.get('X-Object-Sysmeta-Ec-Etag') == etag2:
etag2_conns.append(conn)
self.assertEqual(
([True] * 8) + [False], # the resumed etag2 doesn't get closed
[conn.closed for conn in etag2_conns])
def test_fix_response_HEAD(self):
headers = {'X-Object-Sysmeta-Ec-Content-Length': '10',
'X-Object-Sysmeta-Ec-Etag': 'foo'}
# sucsessful HEAD
responses = [(200, b'', headers)]
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'')
# 200OK shows original object content length
self.assertEqual(resp.headers['Content-Length'], '10')
self.assertEqual(resp.headers['Etag'], 'foo')
# not found HEAD
responses = [(404, b'', {})] * self.replicas() * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# 404 shows actual response body size (i.e. 0 for HEAD)
self.assertEqual(resp.headers['Content-Length'], '0')
def test_GET_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = (b'a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body, usedforsecurity=False).hexdigest()
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments * self.policy.ec_duplication_factor]
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertGreaterEqual(start, 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
range_not_satisfiable_body = range_not_satisfiable_body.encode('utf-8')
if start >= segment_size:
responses = [(416, range_not_satisfiable_body,
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, b''.join(node_fragments[i]),
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
def test_non_durable_ec_response_bucket(self):
ts = self.ts()
bucket = obj.ECGetResponseBucket(self.policy, ts)
self.assertEqual(bucket.shortfall, self.policy.ec_ndata)
for i in range(1, self.policy.ec_ndata - self.policy.ec_nparity + 1):
stub_getter = mock.MagicMock(last_status=200, last_headers={
'X-Backend-Timestamp': ts.internal,
'X-Object-Sysmeta-Ec-Etag': 'the-etag',
'X-Object-Sysmeta-Ec-Frag-Index': str(i),
})
bucket.add_response(stub_getter, None)
self.assertEqual(bucket.shortfall, self.policy.ec_ndata - i)
self.assertEqual(bucket.shortfall, self.policy.ec_nparity)
self.assertFalse(bucket.durable)
expectations = (
4, # 7
4, # 8
4, # 9
4, # 10
3, # 11
2, # 12
1, # 13
1, # 14
)
for i, expected in zip(range(
self.policy.ec_ndata - self.policy.ec_nparity + 1,
self.policy.object_ring.replica_count + 1), expectations):
stub_getter = mock.MagicMock(last_status=200, last_headers={
'X-Backend-Timestamp': ts.internal,
'X-Object-Sysmeta-Ec-Etag': 'the-etag',
'X-Object-Sysmeta-Ec-Frag-Index': str(i),
})
bucket.add_response(stub_getter, None)
msg = 'With %r resp, expected shortfall %s != %s' % (
bucket.gets.keys(), expected, bucket.shortfall)
self.assertEqual(bucket.shortfall, expected, msg)
class TestECFunctions(unittest.TestCase):
def test_chunk_transformer(self):
def do_test(dup_factor, segments):
segment_size = 1024
orig_chunks = []
for i in range(segments):
orig_chunks.append(
chr(i + 97).encode('latin-1') * segment_size)
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(
replicas=10 * dup_factor),
ec_segment_size=segment_size,
ec_duplication_factor=dup_factor)
encoded_chunks = [[] for _ in range(policy.ec_n_unique_fragments)]
for orig_chunk in orig_chunks:
# each segment produces a set of frags
frag_set = policy.pyeclib_driver.encode(orig_chunk)
for frag_index, frag_data in enumerate(frag_set):
encoded_chunks[frag_index].append(frag_data)
# chunk_transformer buffers and concatenates multiple frags
expected = [b''.join(frags) for frags in encoded_chunks]
transform = obj.chunk_transformer(policy)
transform.send(None)
backend_chunks = transform.send(b''.join(orig_chunks))
self.assertIsNotNone(backend_chunks) # sanity
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
# flush out last chunk buffer
backend_chunks = transform.send(b'')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual([b''] * policy.ec_n_unique_fragments,
backend_chunks)
do_test(dup_factor=1, segments=1)
do_test(dup_factor=2, segments=1)
do_test(dup_factor=3, segments=1)
do_test(dup_factor=1, segments=2)
do_test(dup_factor=2, segments=2)
do_test(dup_factor=3, segments=2)
def test_chunk_transformer_non_aligned_last_chunk(self):
last_chunk = b'a' * 128
def do_test(dup):
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10 * dup),
ec_segment_size=1024,
ec_duplication_factor=dup)
expected = policy.pyeclib_driver.encode(last_chunk)
transform = obj.chunk_transformer(policy)
transform.send(None)
transform.send(last_chunk)
# flush out last chunk buffer
backend_chunks = transform.send(b'')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
do_test(1)
do_test(2)
def test_client_range_to_segment_range(self):
actual = obj.client_range_to_segment_range(100, 700, 512)
self.assertEqual(actual, (0, 1023))
self.assertEqual([type(x) for x in actual], [int, int])
actual = obj.client_range_to_segment_range(100, 700, 256)
self.assertEqual(actual, (0, 767))
self.assertEqual([type(x) for x in actual], [int, int])
actual = obj.client_range_to_segment_range(300, None, 256)
self.assertEqual(actual, (256, None))
self.assertEqual([type(x) for x in actual], [int, type(None)])
def test_segment_range_to_fragment_range(self):
actual = obj.segment_range_to_fragment_range(0, 1023, 512, 300)
self.assertEqual(actual, (0, 599))
self.assertEqual([type(x) for x in actual], [int, int])
actual = obj.segment_range_to_fragment_range(0, 767, 256, 100)
self.assertEqual(actual, (0, 299))
self.assertEqual([type(x) for x in actual], [int, int])
actual = obj.segment_range_to_fragment_range(256, None, 256, 100)
self.assertEqual(actual, (100, None))
self.assertEqual([type(x) for x in actual], [int, type(None)])
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='unu')],
fake_ring_args=[{'replicas': 28}, {}])
class TestECDuplicationObjController(
ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _test_GET_with_duplication_factor(self, node_frags, obj):
# This is basic tests in the healthy backends status
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj['etag'])
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
obj['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# the backend requests should be >= num_data_fragments
self.assertGreaterEqual(len(log), self.policy.ec_ndata)
# but <= # of replicas
self.assertLessEqual(len(log), self.replicas())
self.assertEqual(len(collected_responses), 1)
etag, frags = list(collected_responses.items())[0]
# the backend requests will stop at enough ec_ndata responses
self.assertEqual(
len(frags), self.policy.ec_ndata,
'collected %s frags (expected %s) for etag %s' % (
len(frags), self.policy.ec_ndata, etag))
# TODO: actually "frags" in node_frags is meaning "node_index" right now
# in following tests. Reconsidering the name and semantics change needed.
# Or, just mapping to be correct as frag_index is enough?.
def test_GET_with_duplication_factor(self):
obj = self._make_ec_object_stub()
node_frags = [
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
] * 2 # duplicated!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_duplication_factor_almost_duplicate_dispersion(self):
obj = self._make_ec_object_stub()
node_frags = [
# first half of # of replicas are 0, 1, 2, 3, 4, 5, 6
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 6},
# second half of # of replicas are 7, 8, 9, 10, 11, 12, 13
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
{'obj': obj, 'frag': 13},
]
# ...but it still works!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# both of obj1 and obj2 has only 9 frags which is not able to decode
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
]
# ... and the rest are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust to the last of handoffs
self.assertEqual(len(log), self.replicas() * 2)
# we have obj1, obj2, and 404 NotFound in collected_responses
self.assertEqual(len(list(collected_responses.keys())), 3)
self.assertIn(obj1['etag'], collected_responses)
self.assertIn(obj2['etag'], collected_responses)
self.assertIn(None, collected_responses)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# primaries
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
]
node_frags = node_frags * 2 # 2 duplication
# so the primaries have indexes 0, 1, 3, 4, 5, 7, 8, 12, 13
# (9 indexes) for obj2 and then a handoff has index 6
node_frags += [
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self.ts())
# 28 nodes are here
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
[],
]
node_frags += [[]] * 13 # Plus 13 nodes in handoff
# finally 10th fragment for obj2 found
node_frags += [[{'obj': obj2, 'frag': 9}]]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(
md5(resp.body, usedforsecurity=False).hexdigest(),
obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
stub_objects = [
self._make_ec_object_stub(pattern='obj1'),
self._make_ec_object_stub(pattern='obj2'),
self._make_ec_object_stub(pattern='obj3'),
self._make_ec_object_stub(pattern='obj4'),
self._make_ec_object_stub(pattern='obj5'),
self._make_ec_object_stub(pattern='obj6'),
self._make_ec_object_stub(pattern='obj7'),
]
etags = collections.Counter(stub['etag'] for stub in stub_objects)
self.assertEqual(len(etags), 7, etags) # sanity
# primaries and handoffs for required nodes
# this is 10-4 * 2 case so that 56 requests (2 * replicas) required
# to give up. we prepares 7 different objects above so responses
# will have 8 fragments for each object
required_nodes = self.replicas() * 2
# fill them out to the primary and handoff nodes
node_frags = []
for frag in range(8):
for stub_obj in stub_objects:
if len(node_frags) >= required_nodes:
# we already have enough responses
break
node_frags.append({'obj': stub_obj, 'frag': frag})
# sanity
self.assertEqual(required_nodes, len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
self.assertEqual(required_nodes, len(log))
self.assertEqual(len(collected_etags), 7)
self.assertEqual({200}, collected_status)
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
]
node_frags = node_frags * 2 # 2 duplications
node_frags += [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(self.replicas() * 2, len(log))
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
# plus, 4 different objects and 7 indexes will b 28 node responses
# here for handoffs
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj4, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj4, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj4, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj4, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj4, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj4, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj4, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# the difference from parent class is only handoff stub length
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
# handoffs
]
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertTrue(resp.body)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7
+ ['Object returning 503 for []'])
def _test_determine_chunk_destinations_prioritize(
self, missing_two, missing_one):
# This scenario is only likely for ec_duplication_factor >= 2. If we
# have multiple failures such that the putters collection is missing
# two primary nodes for frag index 'missing_two' and missing one
# primary node for frag index 'missing_one', then we should prioritize
# finding a handoff for frag index 'missing_two'.
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# sanity, caller must set missing_two < than ec_num_unique_fragments
self.assertLess(missing_two, self.policy.ec_n_unique_fragments)
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
# sanity - all putters have primary nodes
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = self.policy.get_backend_index(i)
self.assertEqual(got, expected)
# now, for fragment index that is missing two copies, lets make one
# putter be a handoff
handoff_putter = putters[missing_two]
handoff_putter.node_index = None
# and then pop another putter for a copy of same fragment index
putters.pop(missing_two + self.policy.ec_n_unique_fragments)
# also pop one copy of a different fragment to make one missing hole
putters.pop(missing_one)
# then determine chunk destinations: we have 26 putters here;
# missing_two frag index is missing two copies; missing_one frag index
# is missing one copy, therefore the handoff node should be assigned to
# missing_two frag index
got = controller._determine_chunk_destinations(putters, self.policy)
# N.B. len(putters) is now len(expected - 2) due to pop twice
self.assertEqual(len(putters), len(got))
# sanity, no node index - for handoff putter
self.assertIsNone(handoff_putter.node_index)
self.assertEqual(got[handoff_putter], missing_two)
# sanity, other nodes except handoff_putter have node_index
self.assertTrue(all(
[putter.node_index is not None for putter in got if
putter != handoff_putter]))
def test_determine_chunk_destinations_prioritize_more_missing(self):
# drop node_index 0, 14 and 1 should work
self._test_determine_chunk_destinations_prioritize(0, 1)
# drop node_index 1, 15 and 0 should work, too
self._test_determine_chunk_destinations_prioritize(1, 0)
class ECCommonPutterMixin(object):
# EC PUT tests common to both Mime and PUT+POST protocols
expect_headers = {}
def test_PUT_ec_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_ec_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body=b'test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
# This is how CommonObjectControllerMixin is supposed to be used:
# @patch_policies(with_ec_default=True)
# class TestECObjControllerDoublePutter(BaseObjectControllerMixin,
# ECCommonPutterMixin,
# unittest.TestCase):
# # tests specific to the PUT+POST protocol
#
# def setUp(self):
# super(TestECObjControllerDoublePutter, self).setUp()
# # force use of the DoublePutter class
# self.app.use_put_v1 = True
@patch_policies(with_ec_default=True)
class TestECObjControllerMimePutter(BaseObjectControllerMixin,
ECCommonPutterMixin,
unittest.TestCase):
# tests specific to the older PUT protocol using a MimePutter
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
def setUp(self):
super(TestECObjControllerMimePutter, self).setUp()
# force use of the MimePutter class
self.app.use_put_v1 = False
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body_and_bad_etag(self):
segment_size = self.policy.ec_segment_size
test_body = (b'asdf' * segment_size)[:-10]
codes = [201] * self.replicas()
conns = []
def capture_expect(conn):
# stash the backend connection so we can verify that it is closed
# (no data will be sent)
conns.append(conn)
# send a bad etag in the request headers
headers = {'Etag': 'bad etag'}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', headers=headers, body=test_body)
with set_http_connect(*codes, expect_headers=self.expect_headers,
give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
# make the footers callback send the correct etag
footers_callback = make_footers_callback(test_body)
env = {'swift.callback.update_footers': footers_callback}
headers = {'Etag': 'bad etag'}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', headers=headers, environ=env,
body=test_body)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(201, resp.status_int)
# make the footers callback send a bad Etag footer
footers_callback = make_footers_callback(b'not the test body')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
def test_txn_id_logging_ECPUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
codes = [(100, Timeout(), 503, 503)] * self.replicas()
stdout = StringIO()
with set_http_connect(*codes, expect_headers=self.expect_headers), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get ', stdout.getvalue())
def test_PUT_with_explicit_commit_status(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [(100, 100, 201)] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_success(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * self.quorum()
codes += [503] * (self.replicas() - len(codes))
random.shuffle(codes)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [(100, 503, Exception('not used'))] * self.replicas()
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * self.quorum()
codes += [(100, 503, Exception('not used'))] * (
self.replicas() - len(codes))
random.shuffle(codes)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [(100, 503, Exception('not used'))] * self.quorum()
if isinstance(self.policy, ECStoragePolicy):
codes *= self.policy.ec_duplication_factor
codes += [201] * (self.replicas() - len(codes))
random.shuffle(codes)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_commit_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * (self.replicas() - 1)
codes.append((100, Timeout(), Exception('not used')))
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_commit_exception(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * (self.replicas() - 1)
codes.append((100, Exception('kaboom!'), Exception('not used')))
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body(self):
self._test_PUT_with_body()
def test_PUT_with_chunked_body(self):
self._test_PUT_with_body(chunked=True, content_length=False)
def test_PUT_with_both_body(self):
self._test_PUT_with_body(chunked=True, content_length=True)
def _test_PUT_with_body(self, chunked=False, content_length=True):
segment_size = self.policy.ec_segment_size
test_body = (b'asdf' * segment_size)[:-10]
# make the footers callback not include Etag footer so that we can
# verify that the correct EC-calculated Etag is included in footers
# sent to backend
footers_callback = make_footers_callback()
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env)
etag = md5(test_body, usedforsecurity=False).hexdigest()
size = len(test_body)
req.body = test_body
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
if not content_length:
del req.headers['Content-Length']
codes = [201] * self.replicas()
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers.get(
'X-Backend-Obj-Content-Length')
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
with set_http_connect(*codes, expect_headers=self.expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
frag_archives = []
for connection_id, info in put_requests.items():
body = unchunk_body(b''.join(info['chunks']))
self.assertIsNotNone(info['boundary'],
"didn't get boundary for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = EmailFeedParser()
parser.feed(
("Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
info['boundary']).encode('ascii'))
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# attach the body to frag_archives list
self.assertEqual(obj_part['X-Document'], 'object body')
obj_payload = obj_part.get_payload(decode=True)
frag_archives.append(obj_payload)
if chunked:
self.assertIsNone(info['backend-content-length'])
else:
self.assertTrue(
size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
# update expected with footers from the callback...
footers_callback(expected)
expected.update({
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Backend-Container-Update-Override-Size': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Container-Update-Override-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_payload, usedforsecurity=False).hexdigest()})
for header, value in expected.items():
self.assertEqual(footer_metadata[header], value)
# sanity on commit message
self.assertEqual(commit_part['X-Document'], 'put commit')
self.assertEqual(len(frag_archives), self.replicas())
fragment_size = self.policy.fragment_size
node_payloads = []
for fa in frag_archives:
payload = [fa[x:x + fragment_size]
for x in range(0, len(fa), fragment_size)]
node_payloads.append(payload)
fragment_payloads = zip(*node_payloads)
expected_body = b''
for fragment_payload in fragment_payloads:
self.assertEqual(len(fragment_payload), self.replicas())
if True:
fragment_payload = list(fragment_payload)
expected_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(test_body), len(expected_body))
self.assertEqual(test_body, expected_body)
def test_PUT_with_footers(self):
# verify footers supplied by a footers callback being added to
# trailing metadata
segment_size = self.policy.ec_segment_size
test_body = (b'asdf' * segment_size)[:-10]
etag = md5(test_body, usedforsecurity=False).hexdigest()
size = len(test_body)
codes = [201] * self.replicas()
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
def do_test(footers_to_add, expect_added):
put_requests = defaultdict(
lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
def footers_callback(footers):
footers.update(footers_to_add)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=self.expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
for connection_id, info in put_requests.items():
body = unchunk_body(b''.join(info['chunks']))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = EmailFeedParser()
parser.feed(
("Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n"
% info['boundary']).encode('ascii'))
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# validate EC footer metadata - should always be present
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertIsNotNone(
footer_metadata.pop('X-Object-Sysmeta-Ec-Frag-Index'))
expected = {
'X-Object-Sysmeta-Ec-Scheme':
self.policy.ec_scheme_description,
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(
obj_part.get_payload(decode=True),
usedforsecurity=False).hexdigest()}
expected.update(expect_added)
for header, value in expected.items():
self.assertIn(header, footer_metadata)
self.assertEqual(value, footer_metadata[header])
footer_metadata.pop(header)
self.assertFalse(footer_metadata)
# sanity check - middleware sets no footer, expect EC overrides
footers_to_add = {}
expect_added = {
'X-Backend-Container-Update-Override-Size': str(size),
'X-Backend-Container-Update-Override-Etag': etag}
do_test(footers_to_add, expect_added)
# middleware cannot overwrite any EC sysmeta
footers_to_add = {
'X-Object-Sysmeta-Ec-Content-Length': str(size + 1),
'X-Object-Sysmeta-Ec-Etag': 'other etag',
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size + 1),
'X-Object-Sysmeta-Ec-Unused-But-Reserved': 'ignored'}
do_test(footers_to_add, expect_added)
# middleware can add x-object-sysmeta- headers including
# x-object-sysmeta-container-update-override headers
footers_to_add = {
'X-Object-Sysmeta-Foo': 'bar',
'X-Object-Sysmeta-Container-Update-Override-Size':
str(size + 1),
'X-Object-Sysmeta-Container-Update-Override-Etag': 'other etag',
'X-Object-Sysmeta-Container-Update-Override-Ping': 'pong'
}
expect_added.update(footers_to_add)
do_test(footers_to_add, expect_added)
# middleware can also overwrite x-backend-container-update-override
# headers
override_footers = {
'X-Backend-Container-Update-Override-Wham': 'bam',
'X-Backend-Container-Update-Override-Size': str(size + 2),
'X-Backend-Container-Update-Override-Etag': 'another etag'}
footers_to_add.update(override_footers)
expect_added.update(override_footers)
do_test(footers_to_add, expect_added)
def test_PUT_old_obj_server(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
responses = [
# one server will response 100-continue but not include the
# needful expect headers and the connection will be dropped
((100, Exception('not used')), {}),
] + [
# and pleanty of successful responses too
(201, {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes',
}),
] * self.replicas()
random.shuffle(responses)
if responses[-1][0] != 201:
# whoops, stupid random
responses = responses[1:] + [responses[0]]
codes, expect_headers = zip(*responses)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_slow_commits(self):
# It's important that this timeout be much less than the delay in
# the slow commit responses so that the slow commits are not waited
# for.
self.app.post_quorum_timeout = 0.01
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
# plenty of slow commits
response_sleep = 5.0
codes = [FakeStatus(201, response_sleep=response_sleep)
for i in range(self.replicas())]
# swap out some with regular fast responses
number_of_fast_responses_needed_to_be_quick_enough = \
self.policy.quorum
fast_indexes = random.sample(
range(self.replicas()),
number_of_fast_responses_needed_to_be_quick_enough)
for i in fast_indexes:
codes[i] = 201
with set_http_connect(*codes, expect_headers=self.expect_headers):
start = time.time()
resp = req.get_response(self.app)
response_time = time.time() - start
self.assertEqual(resp.status_int, 201)
self.assertLess(response_time, response_sleep)
def test_PUT_with_just_enough_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * (self.policy.ec_ndata + 1)
codes += [503] * (self.policy.ec_nparity - 1)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_less_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=b'')
codes = [201] * (self.policy.ec_ndata)
codes += [503] * (self.policy.ec_nparity)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
with set_http_connect(*codes, expect_headers=self.expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
class TestNumContainerUpdates(unittest.TestCase):
def test_it(self):
test_cases = [
# (container replicas, object replicas, object quorum, expected)
(3, 17, 13, 6), # EC 12+5
(3, 9, 4, 7), # EC 3+6
(3, 14, 11, 5), # EC 10+4
(5, 14, 11, 6), # EC 10+4, 5 container replicas
(7, 14, 11, 7), # EC 10+4, 7 container replicas
(3, 19, 16, 5), # EC 15+4
(5, 19, 16, 6), # EC 15+4, 5 container replicas
(3, 28, 22, 8), # EC (10+4)x2
(5, 28, 22, 9), # EC (10+4)x2, 5 container replicas
(3, 1, 1, 3), # 1 object replica
(3, 2, 1, 3), # 2 object replicas
(3, 3, 2, 3), # 3 object replicas
(3, 4, 2, 4), # 4 object replicas
(3, 5, 3, 4), # 5 object replicas
(3, 6, 3, 5), # 6 object replicas
(3, 7, 4, 5), # 7 object replicas
]
for c_replica, o_replica, o_quorum, exp in test_cases:
c_quorum = utils.quorum_size(c_replica)
got = obj.num_container_updates(c_replica, c_quorum,
o_replica, o_quorum)
self.assertEqual(
exp, got,
"Failed for c_replica=%d, o_replica=%d, o_quorum=%d" % (
c_replica, o_replica, o_quorum))
@patch_policies(with_ec_default=True)
class TestECFragGetter(BaseObjectControllerMixin, unittest.TestCase):
def setUp(self):
super(TestECFragGetter, self).setUp()
req = Request.blank(path='/a/c/o')
self.getter = obj.ECFragGetter(
self.app, req, None, None, self.policy, 'a/c/o',
{}, None, self.logger.thread_locals,
self.logger)
def test_iter_bytes_from_response_part(self):
part = FileLikeIter([b'some', b'thing'])
it = self.getter._iter_bytes_from_response_part(part, nbytes=None)
self.assertEqual(b'something', b''.join(it))
def test_iter_bytes_from_response_part_insufficient_bytes(self):
part = FileLikeIter([b'some', b'thing'])
it = self.getter._iter_bytes_from_response_part(part, nbytes=100)
with mock.patch.object(self.getter, '_find_source',
return_value=False):
with self.assertRaises(ShortReadError) as cm:
b''.join(it)
self.assertEqual('Too few bytes; read 9, expecting 100',
str(cm.exception))
def test_iter_bytes_from_response_part_read_timeout(self):
part = FileLikeIter([b'some', b'thing'])
self.app.recoverable_node_timeout = 0.05
self.app.client_timeout = 0.8
it = self.getter._iter_bytes_from_response_part(part, nbytes=9)
with mock.patch.object(self.getter, '_find_source',
return_value=False):
with mock.patch.object(part, 'read',
side_effect=[b'some', ChunkReadTimeout(9)]):
with self.assertRaises(ChunkReadTimeout) as cm:
b''.join(it)
self.assertEqual('9 seconds', str(cm.exception))
def test_iter_bytes_from_response_part_small_fragment_size(self):
self.getter.fragment_size = 4
part = FileLikeIter([b'some', b'thing', b''])
it = self.getter._iter_bytes_from_response_part(part, nbytes=None)
self.assertEqual([b'some', b'thin', b'g'], [ch for ch in it])
self.getter.fragment_size = 1
part = FileLikeIter([b'some', b'thing', b''])
it = self.getter._iter_bytes_from_response_part(part, nbytes=None)
self.assertEqual([c.encode() for c in 'something'], [ch for ch in it])
def test_fragment_size(self):
source = FakeSource((
b'abcd', b'1234', b'abc', b'd1', b'234abcd1234abcd1', b'2'))
req = Request.blank('/v1/a/c/o')
def mock_source_gen():
yield GetterSource(self.app, source, {})
self.getter.fragment_size = 8
with mock.patch.object(self.getter, '_source_gen',
mock_source_gen):
it = self.getter.response_parts_iter(req)
fragments = list(next(it)['part_iter'])
self.assertEqual(fragments, [
b'abcd1234', b'abcd1234', b'abcd1234', b'abcd12'])
def test_fragment_size_resuming(self):
node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'}
source1 = FakeSource([b'abcd', b'1234', None,
b'efgh', b'5678', b'lots', b'more', b'data'])
# incomplete reads of fragment_size will be re-fetched
source2 = FakeSource([b'efgh', b'5678', b'lots', None])
source3 = FakeSource([b'lots', b'more', b'data'])
req = Request.blank('/v1/a/c/o')
range_headers = []
sources = [GetterSource(self.app, src, node)
for src in (source1, source2, source3)]
def mock_source_gen():
for source in sources:
range_headers.append(self.getter.backend_headers.get('Range'))
yield source
self.getter.fragment_size = 8
with mock.patch.object(self.getter, '_source_gen',
mock_source_gen):
it = self.getter.response_parts_iter(req)
fragments = list(next(it)['part_iter'])
self.assertEqual(fragments, [
b'abcd1234', b'efgh5678', b'lotsmore', b'data'])
self.assertEqual(range_headers, [None, 'bytes=8-27', 'bytes=16-27'])
def test_fragment_size_resuming_chunked(self):
node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'}
headers = {'transfer-encoding': 'chunked',
'content-type': 'text/plain'}
source1 = FakeSource([b'abcd', b'1234', b'abc', None], headers=headers)
source2 = FakeSource([b'efgh5678'], headers=headers)
range_headers = []
sources = [GetterSource(self.app, src, node)
for src in (source1, source2)]
req = Request.blank('/v1/a/c/o')
def mock_source_gen():
for source in sources:
range_headers.append(self.getter.backend_headers.get('Range'))
yield source
self.getter.fragment_size = 8
with mock.patch.object(self.getter, '_source_gen',
mock_source_gen):
it = self.getter.response_parts_iter(req)
fragments = list(next(it)['part_iter'])
self.assertEqual(fragments, [b'abcd1234', b'efgh5678'])
self.assertEqual(range_headers, [None, 'bytes=8-'])
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/controllers/test_obj.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware.acl import format_acl
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_account_info
from swift.common import constraints
from test.unit import fake_http_connect, FakeRing, mocked_http_conn
from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
import swift.proxy.controllers.base
from swift.proxy.controllers.base import get_account_info
from test.unit import patch_policies
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
ACCOUNT_REPLICAS = 3
def setUp(self):
self.app = proxy_server.Application(
None,
account_ring=FakeRing(), container_ring=FakeRing())
def _make_callback_func(self, context):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
return callback
def _assert_responses(self, method, test_cases):
if method in ('PUT', 'DELETE'):
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
for responses, expected in test_cases:
with mock.patch(
'swift.proxy.controllers.base.http_connect',
fake_http_connect(*responses)):
req = Request.blank('/v1/AUTH_bob')
resp = getattr(controller, method)(req)
self.assertEqual(expected,
resp.status_int,
'Expected %s but got %s. Failed case: %s' %
(expected, resp.status_int, str(responses)))
def test_account_info_in_response_env(self):
controller = proxy_server.AccountController(self.app, 'AUTH_bob')
with mocked_http_conn(200) as mock_conn:
req = Request.blank('/v1/AUTH_bob')
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
self.assertEqual(['/AUTH_bob'],
# requests are like /sdX/0/..
[r['path'][6:] for r in mock_conn.requests])
info_cache = resp.environ['swift.infocache']
self.assertIn('account/AUTH_bob', info_cache)
header_info = headers_to_account_info(resp.headers)
self.assertEqual(header_info, info_cache['account/AUTH_bob'])
# The failure doesn't lead to cache eviction
errors = [500] * self.ACCOUNT_REPLICAS
with mocked_http_conn(*errors) as mock_conn:
req = Request.blank('/v1/AUTH_bob', {
'PATH_INFO': '/v1/AUTH_bob', 'swift.infocache': info_cache})
resp = controller.HEAD(req)
self.assertEqual(5, resp.status_int // 100)
self.assertEqual(['/AUTH_bob'] * self.ACCOUNT_REPLICAS,
# requests are like /sdX/0/..
[r['path'][6:] for r in mock_conn.requests])
self.assertIs(info_cache, resp.environ['swift.infocache'])
# The *old* header info is all still there
self.assertIn('account/AUTH_bob', info_cache)
self.assertEqual(header_info, info_cache['account/AUTH_bob'])
def test_swift_owner(self):
owner_headers = {
'x-account-meta-temp-url-key': 'value',
'x-account-meta-temp-url-key-2': 'value'}
controller = proxy_server.AccountController(self.app, 'a')
req = Request.blank('/v1/a')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertNotIn(key, resp.headers)
req = Request.blank('/v1/a', environ={'swift_owner': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
for key in owner_headers:
self.assertIn(key, resp.headers)
def test_get_deleted_account(self):
resp_headers = {
'x-backend-timestamp': '123.456',
'x-account-status': 'deleted',
}
controller = proxy_server.AccountController(self.app, 'a')
req = Request.blank('/v1/a')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(404, headers=resp_headers)):
resp = controller.HEAD(req)
self.assertEqual(410, resp.status_int)
def test_long_acct_names(self):
long_acct_name = '%sLongAccountName' % (
'Very' * (constraints.MAX_ACCOUNT_NAME_LENGTH // 4))
controller = proxy_server.AccountController(self.app, long_acct_name)
req = Request.blank('/v1/%s' % long_acct_name)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.HEAD(req)
self.assertEqual(400, resp.status_int)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.GET(req)
self.assertEqual(400, resp.status_int)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200)):
resp = controller.POST(req)
self.assertEqual(400, resp.status_int)
def test_sys_meta_headers_PUT(self):
# check that headers in sys meta namespace make it through
# the proxy controller
sys_meta_key = '%stest' % get_sys_meta_prefix('account')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Account-Meta-Test'
# allow PUTs to account...
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
self.assertEqual(context['method'], 'PUT')
self.assertIn(sys_meta_key, context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertIn(user_meta_key, context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the proxy controller
sys_meta_key = '%stest' % get_sys_meta_prefix('account')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Account-Meta-Test'
controller = proxy_server.AccountController(self.app, 'a')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.POST(req)
self.assertEqual(context['method'], 'POST')
self.assertIn(sys_meta_key, context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertIn(user_meta_key, context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def _make_user_and_sys_acl_headers_data(self):
acl = {
'admin': ['AUTH_alice', 'AUTH_bob'],
'read-write': ['AUTH_carol'],
'read-only': [],
}
user_prefix = 'x-account-' # external, user-facing
user_headers = {(user_prefix + 'access-control'): format_acl(
version=2, acl_dict=acl)}
sys_prefix = get_sys_meta_prefix('account') # internal, system-facing
sys_headers = {(sys_prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
return user_headers, sys_headers
def test_account_acl_headers_translated_for_GET_HEAD(self):
# Verify that a GET/HEAD which receives X-Account-Sysmeta-Acl-* headers
# from the account server will remap those headers to X-Account-Acl-*
hdrs_ext, hdrs_int = self._make_user_and_sys_acl_headers_data()
controller = proxy_server.AccountController(self.app, 'acct')
for verb in ('GET', 'HEAD'):
req = Request.blank('/v1/acct', environ={'swift_owner': True})
controller.GETorHEAD_base = lambda *_: Response(
headers=hdrs_int, environ={
'PATH_INFO': '/acct',
'REQUEST_METHOD': verb,
})
method = getattr(controller, verb)
resp = method(req)
for header, value in hdrs_ext.items():
if value:
self.assertEqual(resp.headers.get(header), value)
else:
# blank ACLs should result in no header
self.assertNotIn(header, resp.headers)
def test_add_acls_impossible_cases(self):
# For test coverage: verify that defensive coding does defend, in cases
# that shouldn't arise naturally
# add_acls should do nothing if REQUEST_METHOD isn't HEAD/GET/PUT/POST
resp = Response()
controller = proxy_server.AccountController(self.app, 'a')
resp.environ['PATH_INFO'] = '/a'
resp.environ['REQUEST_METHOD'] = 'OPTIONS'
controller.add_acls_from_sys_metadata(resp)
self.assertEqual(1, len(resp.headers)) # we always get Content-Type
self.assertEqual(2, len(resp.environ))
def test_cache_key_impossible_cases(self):
# For test coverage: verify that defensive coding does defend, in cases
# that shouldn't arise naturally
with self.assertRaises(ValueError):
# Container needs account
swift.proxy.controllers.base.get_cache_key(None, 'c')
with self.assertRaises(ValueError):
# Object needs account
swift.proxy.controllers.base.get_cache_key(None, 'c', 'o')
with self.assertRaises(ValueError):
# Object needs container
swift.proxy.controllers.base.get_cache_key('a', None, 'o')
def test_stripping_swift_admin_headers(self):
# Verify that a GET/HEAD which receives privileged headers from the
# account server will strip those headers for non-swift_owners
headers = {
'x-account-meta-harmless': 'hi mom',
'x-account-meta-temp-url-key': 's3kr1t',
}
controller = proxy_server.AccountController(self.app, 'acct')
for verb in ('GET', 'HEAD'):
for env in ({'swift_owner': True}, {'swift_owner': False}):
req = Request.blank('/v1/acct', environ=env)
controller.GETorHEAD_base = lambda *_: Response(
headers=headers, environ={
'PATH_INFO': '/acct',
'REQUEST_METHOD': verb,
})
method = getattr(controller, verb)
resp = method(req)
self.assertEqual(resp.headers.get('x-account-meta-harmless'),
'hi mom')
privileged_header_present = (
'x-account-meta-temp-url-key' in resp.headers)
self.assertEqual(privileged_header_present, env['swift_owner'])
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201), 201),
((201, 201, 404), 201),
((201, 201, 503), 201),
((201, 404, 404), 404),
((201, 404, 503), 503),
((201, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204), 204),
((204, 204, 404), 204),
((204, 204, 503), 204),
((204, 404, 404), 404),
((204, 404, 503), 503),
((204, 503, 503), 503),
((404, 404, 404), 404),
((404, 404, 503), 404),
((404, 503, 503), 503),
((503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
@patch_policies(
[StoragePolicy(0, 'zero', True, object_ring=FakeRing(replicas=4))])
class TestAccountController4Replicas(TestAccountController):
ACCOUNT_REPLICAS = 4
def setUp(self):
self.app = proxy_server.Application(
None,
account_ring=FakeRing(replicas=4),
container_ring=FakeRing(replicas=4))
def test_response_code_for_PUT(self):
PUT_TEST_CASES = [
((201, 201, 201, 201), 201),
((201, 201, 201, 404), 201),
((201, 201, 201, 503), 201),
((201, 201, 404, 404), 201),
((201, 201, 404, 503), 201),
((201, 201, 503, 503), 201),
((201, 404, 404, 404), 404),
((201, 404, 404, 503), 404),
((201, 404, 503, 503), 503),
((201, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('PUT', PUT_TEST_CASES)
def test_response_code_for_DELETE(self):
DELETE_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('DELETE', DELETE_TEST_CASES)
def test_response_code_for_POST(self):
POST_TEST_CASES = [
((204, 204, 204, 204), 204),
((204, 204, 204, 404), 204),
((204, 204, 204, 503), 204),
((204, 204, 404, 404), 204),
((204, 204, 404, 503), 204),
((204, 204, 503, 503), 204),
((204, 404, 404, 404), 404),
((204, 404, 404, 503), 404),
((204, 404, 503, 503), 503),
((204, 503, 503, 503), 503),
((404, 404, 404, 404), 404),
((404, 404, 404, 503), 404),
((404, 404, 503, 503), 404),
((404, 503, 503, 503), 503),
((503, 503, 503, 503), 503)
]
self._assert_responses('POST', POST_TEST_CASES)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestGetAccountInfo(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None,
account_ring=FakeRing(), container_ring=FakeRing())
def test_get_deleted_account_410(self):
resp_headers = {'x-account-status': 'deleted',
'x-backend-timestamp': '123.456'}
req = Request.blank('/v1/a')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(404, headers=resp_headers)):
info = get_account_info(req.environ, self.app)
self.assertEqual(410, info.get('status'))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/controllers/test_account.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
from argparse import Namespace
import itertools
import json
from collections import defaultdict
import unittest
import mock
import six
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_cache_key, get_account_info, get_info, get_object_info, \
Controller, GetOrHeadHandler, bytes_to_skip, clear_info_cache, \
set_info_cache, NodeIter, headers_from_container_info, \
record_cache_op_metrics, GetterSource
from swift.common.swob import Request, HTTPException, RESPONSE_REASONS, \
bytes_to_wsgi, wsgi_to_str
from swift.common import exceptions
from swift.common.utils import split_path, ShardRange, Timestamp, \
GreenthreadSafeIterator, GreenAsyncPile
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import is_success
from swift.common.storage_policy import StoragePolicy, StoragePolicyCollection
from test.debug_logger import debug_logger
from test.unit import (
fake_http_connect, FakeRing, FakeMemcache, PatchPolicies,
make_timestamp_iter, mocked_http_conn, patch_policies, FakeSource,
StubResponse)
from swift.common.request_helpers import (
get_sys_meta_prefix, get_object_transient_sysmeta
)
class FakeResponse(object):
base_headers = {}
def __init__(self, status_int=200, headers=None, body=b''):
self.status_int = status_int
self._headers = headers or {}
self.body = body
@property
def headers(self):
if is_success(self.status_int):
self._headers.update(self.base_headers)
return self._headers
class AccountResponse(FakeResponse):
base_headers = {
'x-account-container-count': 333,
'x-account-object-count': 1000,
'x-account-bytes-used': 6666,
}
class ContainerResponse(FakeResponse):
base_headers = {
'x-container-object-count': 1000,
'x-container-bytes-used': 6666,
'x-versions-location': bytes_to_wsgi(
u'\U0001F334'.encode('utf8')),
}
class ObjectResponse(FakeResponse):
base_headers = {
'content-length': 5555,
'content-type': 'text/plain'
}
class DynamicResponseFactory(object):
def __init__(self, *statuses):
if statuses:
self.statuses = iter(statuses)
else:
self.statuses = itertools.repeat(200)
self.stats = defaultdict(int)
response_type = {
'obj': ObjectResponse,
'container': ContainerResponse,
'account': AccountResponse,
}
def _get_response(self, type_):
self.stats[type_] += 1
class_ = self.response_type[type_]
return class_(next(self.statuses))
def get_response(self, environ):
(version, account, container, obj) = split_path(
environ['PATH_INFO'], 2, 4, True)
if obj:
resp = self._get_response('obj')
elif container:
resp = self._get_response('container')
else:
resp = self._get_response('account')
resp.account = account
resp.container = container
resp.obj = obj
return resp
class ZeroCacheAccountResponse(FakeResponse):
base_headers = {
'X-Backend-Recheck-Account-Existence': '0',
'x-account-container-count': 333,
'x-account-object-count': 1000,
'x-account-bytes-used': 6666,
}
class ZeroCacheContainerResponse(FakeResponse):
base_headers = {
'X-Backend-Recheck-Container-Existence': '0',
'x-container-object-count': 1000,
'x-container-bytes-used': 6666,
}
class ZeroCacheDynamicResponseFactory(DynamicResponseFactory):
response_type = {
'obj': ObjectResponse,
'container': ZeroCacheContainerResponse,
'account': ZeroCacheAccountResponse,
}
class FakeApp(object):
recheck_container_existence = 30
container_existence_skip_cache = 0
recheck_account_existence = 30
account_existence_skip_cache = 0
logger = None
def __init__(self, response_factory=None, statuses=None):
self.responses = response_factory or \
DynamicResponseFactory(*statuses or [])
self.captured_envs = []
def __call__(self, environ, start_response):
self.captured_envs.append(environ)
response = self.responses.get_response(environ)
reason = RESPONSE_REASONS[response.status_int][0]
start_response('%d %s' % (response.status_int, reason),
[(k, v) for k, v in response.headers.items()])
return iter(response.body)
class FakeCache(FakeMemcache):
def __init__(self, stub=None, **pre_cached):
super(FakeCache, self).__init__()
if pre_cached:
self.store.update(pre_cached)
# Fake a json roundtrip
self.stub = json.loads(json.dumps(stub))
def get(self, key):
return self.stub or self.store.get(key)
class BaseTest(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
self.cache = FakeCache()
self.conf = {}
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.app = proxy_server.Application(self.conf,
logger=self.logger,
account_ring=self.account_ring,
container_ring=self.container_ring)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestFuncs(BaseTest):
def test_get_info_zero_recheck(self):
mock_cache = mock.Mock()
mock_cache.get.return_value = None
app = FakeApp(ZeroCacheDynamicResponseFactory())
env = {'swift.cache': mock_cache}
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_a['bytes'], 6666)
self.assertEqual(info_a['total_object_count'], 1000)
self.assertEqual(info_a['container_count'], 333)
# Make sure the env cache is set
exp_cached_info_a = {
k: str(v) if k in (
'bytes', 'container_count', 'total_object_count') else v
for k, v in info_a.items()}
self.assertEqual(env['swift.infocache'].get('account/a'),
exp_cached_info_a)
# Make sure the app was called
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 0)
# Make sure memcache was called
self.assertEqual(mock_cache.mock_calls, [
mock.call.get('account/a'),
mock.call.set('account/a', exp_cached_info_a, time=0),
])
mock_cache.reset_mock()
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the env cache is set
exp_cached_info_c = {
k: str(v) if k in (
'bytes', 'object_count', 'storage_policy') else v
for k, v in info_c.items()}
self.assertEqual(env['swift.infocache'].get('account/a'),
exp_cached_info_a)
self.assertEqual(env['swift.infocache'].get('container/a/c'),
exp_cached_info_c)
# Check app call for container, but no new calls for account
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 1)
# Make sure container info was cached
self.assertEqual(mock_cache.mock_calls, [
mock.call.get('container/a/c'),
mock.call.set('container/a/c', exp_cached_info_c, time=0),
])
# reset call counts
app = FakeApp(ZeroCacheDynamicResponseFactory())
env = {'swift.cache': mock_cache}
mock_cache.reset_mock()
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEqual(env['swift.infocache'].get('account/a'),
exp_cached_info_a)
self.assertEqual(env['swift.infocache'].get('container/a/c'),
exp_cached_info_c)
# check app calls both account and container
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 1)
# Make sure account info was cached but container was not
self.assertEqual(mock_cache.mock_calls, [
mock.call.get('container/a/c'),
mock.call.get('account/a'),
mock.call.set('account/a', exp_cached_info_a, time=0),
mock.call.set('container/a/c', exp_cached_info_c, time=0),
])
def test_get_info(self):
app = FakeApp()
# Do a non cached call to account
env = {}
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_a['bytes'], 6666)
self.assertEqual(info_a['total_object_count'], 1000)
# Make sure the app was called
self.assertEqual(app.responses.stats['account'], 1)
# Make sure the return value matches get_account_info
account_info = get_account_info({'PATH_INFO': '/v1/a'}, app)
self.assertEqual(info_a, account_info)
# Do an env cached call to account
app.responses.stats['account'] = 0
app.responses.stats['container'] = 0
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_a['bytes'], 6666)
self.assertEqual(info_a['total_object_count'], 1000)
# Make sure the app was NOT called AGAIN
self.assertEqual(app.responses.stats['account'], 0)
# This time do env cached call to account and non cached to container
app.responses.stats['account'] = 0
app.responses.stats['container'] = 0
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the app was called for container but not account
self.assertEqual(app.responses.stats['account'], 0)
self.assertEqual(app.responses.stats['container'], 1)
# This time do a non-cached call to account then non-cached to
# container
app.responses.stats['account'] = 0
app.responses.stats['container'] = 0
app = FakeApp()
env = {} # abandon previous call to env
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# check app calls both account and container
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 1)
# This time do an env-cached call to container while account is not
# cached
app.responses.stats['account'] = 0
app.responses.stats['container'] = 0
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# no additional calls were made
self.assertEqual(app.responses.stats['account'], 0)
self.assertEqual(app.responses.stats['container'], 0)
def test_get_container_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()})
get_container_info(req.environ, app, swift_source='MC')
self.assertEqual([e['swift.source'] for e in app.captured_envs],
['MC', 'MC'])
def test_get_container_info_in_pipeline(self):
final_app = FakeApp()
def factory(app):
def wsgi_filter(env, start_response):
# lots of middlewares get info...
if env['PATH_INFO'].count('/') > 2:
get_container_info(env, app)
else:
get_account_info(env, app)
# ...then decide to no-op based on the result
return app(env, start_response)
# Note that we have to do some book-keeping in tests to mimic what
# would be done in swift.common.wsgi.load_app
wsgi_filter._pipeline_final_app = final_app
wsgi_filter._pipeline_request_logging_app = final_app
return wsgi_filter
# build up a pipeline
filtered_app = factory(factory(factory(final_app)))
req = Request.blank("/v1/a/c/o", environ={'swift.cache': FakeCache()})
req.get_response(filtered_app)
self.assertEqual([e['PATH_INFO'] for e in final_app.captured_envs],
['/v1/a', '/v1/a/c', '/v1/a/c/o'])
def test_get_account_info_uses_logging_app(self):
def factory(app, func=None):
calls = []
def wsgi_filter(env, start_response):
calls.append(env)
if func:
func(env, app)
return app(env, start_response)
return wsgi_filter, calls
# build up a pipeline, pretend there is a proxy_logging middleware
final_app = FakeApp()
logging_app, logging_app_calls = factory(final_app)
filtered_app, filtered_app_calls = factory(logging_app,
func=get_account_info)
# mimic what would be done in swift.common.wsgi.load_app
for app in (filtered_app, logging_app):
app._pipeline_final_app = final_app
app._pipeline_request_logging_app = logging_app
req = Request.blank("/v1/a/c/o", environ={'swift.cache': FakeCache()})
req.get_response(filtered_app)
self.assertEqual([e['PATH_INFO'] for e in final_app.captured_envs],
['/v1/a', '/v1/a/c/o'])
self.assertEqual([e['PATH_INFO'] for e in logging_app_calls],
['/v1/a', '/v1/a/c/o'])
self.assertEqual([e['PATH_INFO'] for e in filtered_app_calls],
['/v1/a/c/o'])
def test_get_container_info_uses_logging_app(self):
def factory(app, func=None):
calls = []
def wsgi_filter(env, start_response):
calls.append(env)
if func:
func(env, app)
return app(env, start_response)
return wsgi_filter, calls
# build up a pipeline, pretend there is a proxy_logging middleware
final_app = FakeApp()
logging_app, logging_app_calls = factory(final_app)
filtered_app, filtered_app_calls = factory(logging_app,
func=get_container_info)
# mimic what would be done in swift.common.wsgi.load_app
for app in (filtered_app, logging_app):
app._pipeline_final_app = final_app
app._pipeline_request_logging_app = logging_app
req = Request.blank("/v1/a/c/o", environ={'swift.cache': FakeCache()})
req.get_response(filtered_app)
self.assertEqual([e['PATH_INFO'] for e in final_app.captured_envs],
['/v1/a', '/v1/a/c', '/v1/a/c/o'])
self.assertEqual([e['PATH_INFO'] for e in logging_app_calls],
['/v1/a', '/v1/a/c', '/v1/a/c/o'])
self.assertEqual([e['PATH_INFO'] for e in filtered_app_calls],
['/v1/a/c/o'])
def test_get_object_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a/c/o",
environ={'swift.cache': FakeCache()})
get_object_info(req.environ, app, swift_source='LU')
self.assertEqual([e['swift.source'] for e in app.captured_envs],
['LU'])
def test_get_container_info_no_cache(self):
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
resp = get_container_info(req.environ, FakeApp())
self.assertEqual(resp['storage_policy'], 0)
self.assertEqual(resp['bytes'], 6666)
self.assertEqual(resp['object_count'], 1000)
expected = u'\U0001F334'
if six.PY2:
expected = expected.encode('utf8')
self.assertEqual(resp['versions'], expected)
def test_get_container_info_no_account(self):
app = FakeApp(statuses=[404, 200])
req = Request.blank("/v1/AUTH_does_not_exist/cont")
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 0)
def test_get_container_info_no_container_gets_cached(self):
fake_cache = FakeCache({})
app = FakeApp(statuses=[200, 404])
req = Request.blank("/v1/AUTH_account/does_not_exist",
environ={'swift.cache': fake_cache})
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 404)
key = get_cache_key("AUTH_account", "does_not_exist")
self.assertIn(key, fake_cache.store)
self.assertEqual(fake_cache.store[key]['status'], 404)
def test_get_container_info_bad_path(self):
fake_cache = FakeCache({})
req = Request.blank("/non-swift/AUTH_account/does_not_exist",
environ={'swift.cache': fake_cache})
info = get_container_info(req.environ, FakeApp(statuses=[400]))
self.assertEqual(info['status'], 0)
# *not* cached
key = get_cache_key("AUTH_account", "does_not_exist")
self.assertNotIn(key, fake_cache.store)
# not even the "account" is cached
key = get_cache_key("AUTH_account")
self.assertNotIn(key, fake_cache.store)
# but if for some reason the account *already was* cached...
fake_cache.store[key] = headers_to_account_info({}, 200)
req = Request.blank("/non-swift/AUTH_account/does_not_exist",
environ={'swift.cache': fake_cache})
info = get_container_info(req.environ, FakeApp(statuses=[400]))
self.assertEqual(info['status'], 0)
# resp *still* not cached
key = get_cache_key("AUTH_account", "does_not_exist")
self.assertNotIn(key, fake_cache.store)
# still nothing, even if the container is already cached, too
fake_cache.store[key] = headers_to_container_info({}, 200)
req = Request.blank("/non-swift/AUTH_account/does_not_exist",
environ={'swift.cache': fake_cache})
info = get_container_info(req.environ, FakeApp(statuses=[400]))
self.assertEqual(info['status'], 0)
def test_get_container_info_no_auto_account(self):
app = FakeApp(statuses=[200])
req = Request.blank("/v1/.system_account/cont")
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 200)
self.assertEqual(info['bytes'], 6666)
self.assertEqual(info['object_count'], 1000)
def test_get_container_info_cache(self):
cache_stub = {
'status': 404, 'bytes': 3333, 'object_count': 10,
'versions': u"\U0001F4A9",
'meta': {u'some-\N{SNOWMAN}': u'non-ascii meta \U0001F334'}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cache_stub)})
resp = get_container_info(req.environ, FakeApp())
self.assertEqual([(k, type(k)) for k in resp],
[(k, str) for k in resp])
self.assertEqual(resp['storage_policy'], 0)
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['object_count'], 10)
self.assertEqual(resp['status'], 404)
expected = u'\U0001F4A9'
if six.PY2:
expected = expected.encode('utf8')
self.assertEqual(resp['versions'], expected)
for subdict in resp.values():
if isinstance(subdict, dict):
self.assertEqual([(k, type(k), v, type(v))
for k, v in subdict.items()],
[(k, str, v, str)
for k, v in subdict.items()])
def test_get_container_info_only_lookup_cache(self):
# no container info is cached in cache.
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
resp = get_container_info(
req.environ, self.app, swift_source=None, cache_only=True)
self.assertEqual(resp['storage_policy'], 0)
self.assertEqual(resp['bytes'], 0)
self.assertEqual(resp['object_count'], 0)
self.assertEqual(resp['versions'], None)
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.miss'])
# container info is cached in cache.
self.logger.clear()
cache_stub = {
'status': 404, 'bytes': 3333, 'object_count': 10,
'versions': u"\U0001F4A9",
'meta': {u'some-\N{SNOWMAN}': u'non-ascii meta \U0001F334'}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cache_stub)})
resp = get_container_info(
req.environ, self.app, swift_source=None, cache_only=True)
self.assertEqual([(k, type(k)) for k in resp],
[(k, str) for k in resp])
self.assertEqual(resp['storage_policy'], 0)
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['object_count'], 10)
self.assertEqual(resp['status'], 404)
expected = u'\U0001F4A9'
if six.PY2:
expected = expected.encode('utf8')
self.assertEqual(resp['versions'], expected)
for subdict in resp.values():
if isinstance(subdict, dict):
self.assertEqual([(k, type(k), v, type(v))
for k, v in subdict.items()],
[(k, str, v, str)
for k, v in subdict.items()])
self.assertEqual(
[x[0][0] for x in
self.logger.logger.statsd_client.calls['increment']],
['container.info.cache.hit'])
def test_get_cache_key(self):
self.assertEqual(get_cache_key("account", "cont"),
'container/account/cont')
self.assertEqual(get_cache_key(b"account", b"cont", b'obj'),
'object/account/cont/obj')
self.assertEqual(get_cache_key(u"account", u"cont", b'obj'),
'object/account/cont/obj')
# Expected result should always be native string
expected = u'container/\N{SNOWMAN}/\U0001F334'
if six.PY2:
expected = expected.encode('utf8')
self.assertEqual(get_cache_key(u"\N{SNOWMAN}", u"\U0001F334"),
expected)
self.assertEqual(get_cache_key(u"\N{SNOWMAN}".encode('utf8'),
u"\U0001F334".encode('utf8')),
expected)
self.assertEqual(get_cache_key("account", "cont", shard="listing"),
'shard-listing-v2/account/cont')
self.assertEqual(get_cache_key("account", "cont", shard="updating"),
'shard-updating-v2/account/cont')
self.assertRaises(ValueError,
get_cache_key, "account", shard="listing")
self.assertRaises(ValueError,
get_cache_key, "account", "cont", "obj",
shard="listing")
def test_get_container_info_env(self):
cache_key = get_cache_key("account", "cont")
req = Request.blank(
"/v1/account/cont",
environ={'swift.infocache': {cache_key: {'bytes': 3867}},
'swift.cache': FakeCache({})})
resp = get_container_info(req.environ, 'xxx')
self.assertEqual(resp['bytes'], 3867)
def test_info_clearing(self):
def check_in_cache(req, cache_key):
self.assertIn(cache_key, req.environ['swift.infocache'])
self.assertIn(cache_key, req.environ['swift.cache'].store)
def check_not_in_cache(req, cache_key):
self.assertNotIn(cache_key, req.environ['swift.infocache'])
self.assertNotIn(cache_key, req.environ['swift.cache'].store)
app = FakeApp(statuses=[200, 200])
acct_cache_key = get_cache_key("account")
cont_cache_key = get_cache_key("account", "cont")
req = Request.blank(
"/v1/account/cont", environ={"swift.cache": FakeCache()})
# populate caches
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 200)
check_in_cache(req, acct_cache_key)
check_in_cache(req, cont_cache_key)
clear_info_cache(req.environ, 'account', 'cont')
check_in_cache(req, acct_cache_key)
check_not_in_cache(req, cont_cache_key)
# Can also use set_info_cache interface
set_info_cache(req.environ, 'account', None, None)
check_not_in_cache(req, acct_cache_key)
check_not_in_cache(req, cont_cache_key)
# check shard cache-keys
shard_cache_key = get_cache_key('account', 'cont', shard='listing')
shard_data = [{'shard': 'ranges'}]
req.environ['swift.infocache'][shard_cache_key] = shard_data
req.environ['swift.cache'].set(shard_cache_key, shard_data, time=600)
check_in_cache(req, shard_cache_key)
clear_info_cache(req.environ, 'account', 'cont',
shard='listing')
check_not_in_cache(req, shard_cache_key)
def test_record_cache_op_metrics(self):
record_cache_op_metrics(
self.logger, 'shard_listing', 'infocache_hit')
self.assertEqual(
self.logger.statsd_client.get_increment_counts().get(
'shard_listing.infocache.hit'),
1)
record_cache_op_metrics(
self.logger, 'shard_listing', 'hit')
self.assertEqual(
self.logger.statsd_client.get_increment_counts().get(
'shard_listing.cache.hit'),
1)
resp = FakeResponse(status_int=200)
record_cache_op_metrics(
self.logger, 'shard_updating', 'skip', resp)
self.assertEqual(
self.logger.statsd_client.get_increment_counts().get(
'shard_updating.cache.skip.200'),
1)
resp = FakeResponse(status_int=503)
record_cache_op_metrics(
self.logger, 'shard_updating', 'disabled', resp)
self.assertEqual(
self.logger.statsd_client.get_increment_counts().get(
'shard_updating.cache.disabled.503'),
1)
# test a cache miss call without response, expect no metric recorded.
self.app.logger = mock.Mock()
record_cache_op_metrics(
self.logger, 'shard_updating', 'miss')
self.app.logger.increment.assert_not_called()
def test_get_account_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()})
get_account_info(req.environ, app, swift_source='MC')
self.assertEqual([e['swift.source'] for e in app.captured_envs],
['MC'])
def test_get_account_info_swift_owner(self):
app = FakeApp()
req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()})
get_account_info(req.environ, app)
self.assertEqual([e['swift_owner'] for e in app.captured_envs],
[True])
def test_get_account_info_infocache(self):
app = FakeApp()
ic = {}
req = Request.blank("/v1/a", environ={'swift.cache': FakeCache(),
'swift.infocache': ic})
get_account_info(req.environ, app)
got_infocaches = [e['swift.infocache'] for e in app.captured_envs]
self.assertEqual(1, len(got_infocaches))
self.assertIs(ic, got_infocaches[0])
def test_get_account_info_no_cache(self):
app = FakeApp()
req = Request.blank("/v1/AUTH_account",
environ={'swift.cache': FakeCache({})})
resp = get_account_info(req.environ, app)
self.assertEqual(resp['bytes'], 6666)
self.assertEqual(resp['total_object_count'], 1000)
def test_get_account_info_cache(self):
# Works with fake apps that return ints in the headers
cached = {'status': 404,
'bytes': 3333,
'total_object_count': 10}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
resp = get_account_info(req.environ, FakeApp())
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['total_object_count'], 10)
self.assertEqual(resp['status'], 404)
# Works with strings too, like you get when parsing HTTP headers
# that came in through a socket from the account server
cached = {'status': 404,
'bytes': '3333',
'container_count': '234',
'total_object_count': '10',
'meta': {}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
resp = get_account_info(req.environ, FakeApp())
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['container_count'], 234)
self.assertEqual(resp['meta'], {})
self.assertEqual(resp['total_object_count'], 10)
def test_get_account_info_env(self):
cache_key = get_cache_key("account")
req = Request.blank(
"/v1/account",
environ={'swift.infocache': {cache_key: {'bytes': 3867}},
'swift.cache': FakeCache({})})
resp = get_account_info(req.environ, 'xxx')
self.assertEqual(resp['bytes'], 3867)
def test_get_account_info_bad_path(self):
fake_cache = FakeCache({})
req = Request.blank("/non-swift/AUTH_account",
environ={'swift.cache': fake_cache})
info = get_account_info(req.environ, FakeApp(statuses=[400]))
self.assertEqual(info['status'], 0)
# *not* cached
key = get_cache_key("AUTH_account")
self.assertNotIn(key, fake_cache.store)
# but if for some reason the account *already was* cached...
fake_cache.store[key] = headers_to_account_info({}, 200)
req = Request.blank("/non-swift/AUTH_account/does_not_exist",
environ={'swift.cache': fake_cache})
info = get_account_info(req.environ, FakeApp(statuses=[400]))
self.assertEqual(info['status'], 0)
def test_get_object_info_env(self):
cached = {'status': 200,
'length': 3333,
'type': 'application/json',
'meta': {}}
cache_key = get_cache_key("account", "cont", "obj")
req = Request.blank(
"/v1/account/cont/obj",
environ={'swift.infocache': {cache_key: cached},
'swift.cache': FakeCache({})})
resp = get_object_info(req.environ, 'xxx')
self.assertEqual(resp['length'], 3333)
self.assertEqual(resp['type'], 'application/json')
def test_get_object_info_no_env(self):
app = FakeApp()
req = Request.blank("/v1/account/cont/obj",
environ={'swift.cache': FakeCache({})})
resp = get_object_info(req.environ, app)
self.assertEqual(app.responses.stats['account'], 0)
self.assertEqual(app.responses.stats['container'], 0)
self.assertEqual(app.responses.stats['obj'], 1)
self.assertEqual(resp['length'], 5555)
self.assertEqual(resp['type'], 'text/plain')
def test_options(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
origin = 'http://m.com'
self.app.cors_allow_origin = [origin]
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': origin,
'Access-Control-Request-Method': 'GET'})
with mock.patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 200)
def test_options_with_null_allow_origin(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
def my_container_info(*args):
return {
'cors': {
'allow_origin': '*',
}
}
base.container_info = my_container_info
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': '*',
'Access-Control-Request-Method': 'GET'})
with mock.patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 200)
def test_options_unauthorized(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
self.app.cors_allow_origin = ['http://NOT_IT']
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
with mock.patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 401)
def test_headers_to_container_info_missing(self):
resp = headers_to_container_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertIsNone(resp['read_acl'])
self.assertIsNone(resp['write_acl'])
self.assertIsNone(resp['sync_key'])
self.assertIsNone(resp['sync_to'])
def test_headers_to_container_info_meta(self):
headers = {'X-Container-Meta-Whatevs': 14,
'x-container-meta-somethingelse': 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_container_info_sys_meta(self):
prefix = get_sys_meta_prefix('container')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_container_info_values(self):
headers = {
'x-container-read': 'readvalue',
'x-container-write': 'writevalue',
'x-container-sync-key': 'keyvalue',
'x-container-sync-to': '//r/c/a/c',
'x-container-meta-access-control-allow-origin': 'here',
}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(resp['read_acl'], 'readvalue')
self.assertEqual(resp['write_acl'], 'writevalue')
self.assertEqual(resp['sync_key'], 'keyvalue')
self.assertEqual(resp['sync_to'], '//r/c/a/c')
self.assertEqual(resp['cors']['allow_origin'], 'here')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_container_info(headers.items(), 200))
def test_headers_from_container_info(self):
self.assertIsNone(headers_from_container_info(None))
self.assertIsNone(headers_from_container_info({}))
meta = {'fruit': 'cake'}
sysmeta = {'green': 'land'}
info = {
'status': 200,
'read_acl': 'my-read-acl',
'write_acl': 'my-write-acl',
'sync_to': 'my-sync-to',
'sync_key': 'my-sync-key',
'object_count': 99,
'bytes': 999,
'versions': 'my-versions',
'storage_policy': '0',
'cors': {
'allow_origin': 'my-cors-origin',
'expose_headers': 'my-cors-hdrs',
'max_age': 'my-cors-age'},
'created_at': '123.456_12',
'put_timestamp': '234.567_34',
'delete_timestamp': '345_67',
'status_changed_at': '246.8_9',
'meta': meta,
'sysmeta': sysmeta,
'sharding_state': 'unsharded'
}
res = headers_from_container_info(info)
expected = {
'X-Backend-Delete-Timestamp': '345_67',
'X-Backend-Put-Timestamp': '234.567_34',
'X-Backend-Sharding-State': 'unsharded',
'X-Backend-Status-Changed-At': '246.8_9',
'X-Backend-Storage-Policy-Index': '0',
'X-Backend-Timestamp': '123.456_12',
'X-Container-Bytes-Used': '999',
'X-Container-Meta-Fruit': 'cake',
'X-Container-Object-Count': '99',
'X-Container-Read': 'my-read-acl',
'X-Container-Sync-Key': 'my-sync-key',
'X-Container-Sync-To': 'my-sync-to',
'X-Container-Sysmeta-Green': 'land',
'X-Container-Write': 'my-write-acl',
'X-Put-Timestamp': '0000000234.56700',
'X-Storage-Policy': 'zero',
'X-Timestamp': '0000000123.45600',
'X-Versions-Location': 'my-versions',
'X-Container-Meta-Access-Control-Allow-Origin': 'my-cors-origin',
'X-Container-Meta-Access-Control-Expose-Headers': 'my-cors-hdrs',
'X-Container-Meta-Access-Control-Max-Age': 'my-cors-age',
}
self.assertEqual(expected, res)
for required in (
'created_at', 'put_timestamp', 'delete_timestamp',
'status_changed_at', 'storage_policy', 'object_count', 'bytes',
'sharding_state'):
incomplete_info = dict(info)
incomplete_info.pop(required)
self.assertIsNone(headers_from_container_info(incomplete_info))
for hdr, optional in (
('X-Container-Read', 'read_acl'),
('X-Container-Write', 'write_acl'),
('X-Container-Sync-Key', 'sync_key'),
('X-Container-Sync-To', 'sync_to'),
('X-Versions-Location', 'versions'),
('X-Container-Meta-Fruit', 'meta'),
('X-Container-Sysmeta-Green', 'sysmeta'),
):
incomplete_info = dict(info)
incomplete_info.pop(optional)
incomplete_expected = dict(expected)
incomplete_expected.pop(hdr)
self.assertEqual(incomplete_expected,
headers_from_container_info(incomplete_info))
for hdr, optional in (
('Access-Control-Allow-Origin', 'allow_origin'),
('Access-Control-Expose-Headers', 'expose_headers'),
('Access-Control-Max-Age', 'max_age'),
):
incomplete_info = dict(info)
incomplete_cors = dict(info['cors'])
incomplete_cors.pop(optional)
incomplete_info['cors'] = incomplete_cors
incomplete_expected = dict(expected)
incomplete_expected.pop('X-Container-Meta-' + hdr)
self.assertEqual(incomplete_expected,
headers_from_container_info(incomplete_info))
def test_container_info_preserves_storage_policy(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
fake_info = {'status': 404, 'storage_policy': 1}
with mock.patch('swift.proxy.controllers.base.'
'get_container_info', return_value=fake_info):
container_info = \
base.container_info(base.account_name, base.container_name,
Request.blank('/'))
self.assertEqual(container_info['status'], 404)
self.assertEqual(container_info['storage_policy'], 1)
self.assertEqual(container_info['partition'], None)
self.assertEqual(container_info['nodes'], None)
def test_container_info_needs_req(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
with mock.patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
container_info = \
base.container_info(base.account_name,
base.container_name, Request.blank('/'))
self.assertEqual(container_info['status'], 503)
def test_headers_to_account_info_missing(self):
resp = headers_to_account_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertIsNone(resp['bytes'])
self.assertIsNone(resp['container_count'])
def test_headers_to_account_info_meta(self):
headers = {'X-Account-Meta-Whatevs': 14,
'x-account-meta-somethingelse': 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_account_info_sys_meta(self):
prefix = get_sys_meta_prefix('account')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_account_info_values(self):
headers = {
'x-account-object-count': '10',
'x-account-container-count': '20',
}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(resp['total_object_count'], '10')
self.assertEqual(resp['container_count'], '20')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_account_info(headers.items(), 200))
def test_headers_to_account_info_storage_policies(self):
headers = {
'x-account-storage-policy-zero-object-count': '13',
'x-account-storage-policy-zero-container-count': '120',
'x-account-storage-policy-zero-bytes-used': '1002',
'x-account-storage-policy-one-object-count': '10',
'x-account-storage-policy-one-container-count': '20',
}
spc = StoragePolicyCollection([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False)])
with PatchPolicies(spc):
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(resp['storage_policies'], {
0: {'object_count': 13,
'container_count': 120,
'bytes': 1002},
1: {'object_count': 10,
'container_count': 20,
'bytes': 0},
})
def test_headers_to_object_info_missing(self):
resp = headers_to_object_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertIsNone(resp['length'])
self.assertIsNone(resp['etag'])
def test_headers_to_object_info_meta(self):
headers = {'X-Object-Meta-Whatevs': 14,
'x-object-meta-somethingelse': 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_object_info_sys_meta(self):
prefix = get_sys_meta_prefix('object')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_object_info_transient_sysmeta(self):
headers = {get_object_transient_sysmeta('Whatevs'): 14,
get_object_transient_sysmeta('somethingelse'): 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(len(resp['transient_sysmeta']), 2)
self.assertEqual(resp['transient_sysmeta']['whatevs'], 14)
self.assertEqual(resp['transient_sysmeta']['somethingelse'], 0)
def test_headers_to_object_info_values(self):
headers = {
'content-length': '1024',
'content-type': 'application/json',
}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(resp['length'], '1024')
self.assertEqual(resp['type'], 'application/json')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_object_info(headers.items(), 200))
def test_base_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertFalse(base.have_quorum([201, 404], 3))
self.assertTrue(base.have_quorum([201, 201], 4))
self.assertFalse(base.have_quorum([201], 4))
self.assertTrue(base.have_quorum([201, 201, 404, 404], 4))
self.assertFalse(base.have_quorum([201, 302, 418, 503], 4))
self.assertTrue(base.have_quorum([201, 503, 503, 201], 4))
self.assertTrue(base.have_quorum([201, 201], 3))
self.assertTrue(base.have_quorum([404, 404], 3))
self.assertTrue(base.have_quorum([201, 201], 2))
self.assertTrue(base.have_quorum([201, 404], 2))
self.assertTrue(base.have_quorum([404, 404], 2))
self.assertTrue(base.have_quorum([201, 404, 201, 201], 4))
def test_best_response_overrides(self):
base = Controller(self.app)
responses = [
(302, 'Found', '', b'The resource has moved temporarily.'),
(100, 'Continue', '', b''),
(404, 'Not Found', '', b'Custom body'),
]
server_type = "Base DELETE"
req = Request.blank('/v1/a/c/o', method='DELETE')
statuses, reasons, headers, bodies = zip(*responses)
# First test that you can't make a quorum with only overridden
# responses
overrides = {302: 204, 100: 204}
resp = base.best_response(req, statuses, reasons, bodies, server_type,
headers=headers, overrides=overrides)
self.assertEqual(resp.status, '503 Service Unavailable')
# next make a 404 quorum and make sure the last delete (real) 404
# status is the one returned.
overrides = {100: 404}
resp = base.best_response(req, statuses, reasons, bodies, server_type,
headers=headers, overrides=overrides)
self.assertEqual(resp.status, '404 Not Found')
self.assertEqual(resp.body, b'Custom body')
def test_range_fast_forward(self):
req = Request.blank('/')
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{})
handler.fast_forward(50)
self.assertEqual(handler.backend_headers['Range'], 'bytes=50-')
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=23-50'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=43-50')
self.assertRaises(HTTPException,
handler.fast_forward, 80)
self.assertRaises(exceptions.RangeAlreadyComplete,
handler.fast_forward, 8)
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=23-'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=43-')
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=-100'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=-80')
self.assertRaises(HTTPException,
handler.fast_forward, 100)
self.assertRaises(exceptions.RangeAlreadyComplete,
handler.fast_forward, 80)
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=0-0'})
self.assertRaises(exceptions.RangeAlreadyComplete,
handler.fast_forward, 1)
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=23-',
'X-Backend-Ignore-Range-If-Metadata-Present':
'X-Static-Large-Object'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=43-')
self.assertNotIn('X-Backend-Ignore-Range-If-Metadata-Present',
handler.backend_headers)
def test_range_fast_forward_after_data_timeout(self):
req = Request.blank('/')
# We get a 200 and learn that it's a 1000-byte object, but receive 0
# bytes of data, so then we get a new node, fast_forward(0), and
# send out a new request. That new request must be for all 1000
# bytes.
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{})
handler.learn_size_from_content_range(0, 999, 1000)
handler.fast_forward(0)
self.assertEqual(handler.backend_headers['Range'], 'bytes=0-999')
# Same story as above, but a 1-byte object so we can have our byte
# indices be 0.
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{})
handler.learn_size_from_content_range(0, 0, 1)
handler.fast_forward(0)
self.assertEqual(handler.backend_headers['Range'], 'bytes=0-0')
# last 100 bytes
handler = GetOrHeadHandler(
self.app, req, None, Namespace(num_primary_nodes=3), None, None,
{'Range': 'bytes=-100'})
handler.learn_size_from_content_range(900, 999, 1000)
handler.fast_forward(0)
self.assertEqual(handler.backend_headers['Range'], 'bytes=900-999')
def test_transfer_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
dst_hdrs = HeaderKeyDict()
base.transfer_headers(hdrs, dst_hdrs)
self.assertEqual(HeaderKeyDict(good_hdrs), dst_hdrs)
def test_generate_request_headers(self):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'x-base-sysmeta-mysysmeta': 'myvalue',
'x-Backend-No-Timestamp-Update': 'true',
'X-Backend-Storage-Policy-Index': '3',
'x-backendoftheworld': 'ignored',
'new-owner': 'Kun'}
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req)
expected_headers = {'x-backend-no-timestamp-update': 'true',
'x-backend-storage-policy-index': '3',
'x-timestamp': mock.ANY,
'x-trans-id': '-',
'Referer': 'GET http://localhost/v1/a/c/o',
'connection': 'close',
'user-agent': 'proxy-server %d' % os.getpid()}
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
for k, v in expected_headers.items():
dst_headers.pop(k)
self.assertFalse(dst_headers)
# with transfer=True
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers.update({'x-base-meta-owner': '',
'x-base-meta-size': '151M',
'x-base-sysmeta-mysysmeta': 'myvalue'})
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
for k, v in expected_headers.items():
dst_headers.pop(k)
self.assertFalse(dst_headers)
# with additional
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(
req, transfer=True,
additional=src_headers)
expected_headers.update({'x-remove-base-meta-owner': 'x',
'x-backendoftheworld': 'ignored',
'new-owner': 'Kun'})
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
for k, v in expected_headers.items():
dst_headers.pop(k)
self.assertFalse(dst_headers)
# with additional, verify precedence
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(
req, transfer=False,
additional={'X-Backend-Storage-Policy-Index': '2',
'X-Timestamp': '1234.56789'})
expected_headers = {'x-backend-no-timestamp-update': 'true',
'x-backend-storage-policy-index': '2',
'x-timestamp': '1234.56789',
'x-trans-id': '-',
'Referer': 'GET http://localhost/v1/a/c/o',
'connection': 'close',
'user-agent': 'proxy-server %d' % os.getpid()}
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
for k, v in expected_headers.items():
dst_headers.pop(k)
self.assertFalse(dst_headers)
def test_generate_request_headers_change_backend_user_agent(self):
base = Controller(self.app)
self.app.backend_user_agent = "swift-flux-capacitor"
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M',
'connection': 'close',
'user-agent': 'swift-flux-capacitor'}
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
self.assertNotIn('new-owner', dst_headers)
def test_generate_request_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
req = Request.blank('/v1/a/c/o', headers=hdrs)
dst_headers = base.generate_request_headers(req, transfer=True)
for k, v in good_hdrs.items():
self.assertIn(k.lower(), dst_headers)
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.items():
self.assertNotIn(k.lower(), dst_headers)
def test_generate_request_headers_with_no_orig_req(self):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = base.generate_request_headers(None,
additional=src_headers,
transfer=True)
expected_headers = {'x-base-meta-size': '151M',
'connection': 'close'}
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
self.assertEqual('', dst_headers['Referer'])
def test_disconnected_logging(self):
self.app.logger = mock.Mock()
req = Request.blank('/v1/a/c/o')
headers = {'content-type': 'text/plain'}
source = FakeSource([], headers=headers, body=b'the cake is a lie')
node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'}
handler = GetOrHeadHandler(
self.app, req, 'Object', Namespace(num_primary_nodes=1), None,
'some-path', {})
def mock_find_source():
handler.source = GetterSource(self.app, source, node)
return True
with mock.patch.object(handler, '_find_source',
mock_find_source):
resp = handler.get_working_response(req)
resp.app_iter.close()
self.app.logger.info.assert_called_once_with(
'Client disconnected on read of %r', 'some-path')
self.app.logger = mock.Mock()
node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'}
handler = GetOrHeadHandler(
self.app, req, 'Object', Namespace(num_primary_nodes=1), None,
None, {})
with mock.patch.object(handler, '_find_source',
mock_find_source):
resp = handler.get_working_response(req)
next(resp.app_iter)
resp.app_iter.close()
self.app.logger.warning.assert_not_called()
def test_bytes_to_skip(self):
# if you start at the beginning, skip nothing
self.assertEqual(bytes_to_skip(1024, 0), 0)
# missed the first 10 bytes, so we've got 1014 bytes of partial
# record
self.assertEqual(bytes_to_skip(1024, 10), 1014)
# skipped some whole records first
self.assertEqual(bytes_to_skip(1024, 4106), 1014)
# landed on a record boundary
self.assertEqual(bytes_to_skip(1024, 1024), 0)
self.assertEqual(bytes_to_skip(1024, 2048), 0)
# big numbers
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32), 0)
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 1), 2 ** 20 - 1)
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 2 ** 19), 2 ** 19)
# odd numbers
self.assertEqual(bytes_to_skip(123, 0), 0)
self.assertEqual(bytes_to_skip(123, 23), 100)
self.assertEqual(bytes_to_skip(123, 247), 122)
# prime numbers
self.assertEqual(bytes_to_skip(11, 7), 4)
self.assertEqual(bytes_to_skip(97, 7873823), 55)
def test_get_shard_ranges_for_container_get(self):
ts_iter = make_timestamp_iter()
shard_ranges = [dict(ShardRange(
'.sharded_a/sr%d' % i, next(ts_iter), '%d_lower' % i,
'%d_upper' % i, object_count=i, bytes_used=1024 * i,
meta_timestamp=next(ts_iter)))
for i in range(3)]
base = Controller(self.app)
req = Request.blank('/v1/a/c', method='GET')
resp_headers = {'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(
200, 200,
body_iter=iter([b'', json.dumps(shard_ranges).encode('ascii')]),
headers=resp_headers
) as fake_conn:
actual, resp = base._get_shard_ranges(req, 'a', 'c')
self.assertEqual(200, resp.status_int)
# account info
captured = fake_conn.requests
self.assertEqual('HEAD', captured[0]['method'])
self.assertEqual('a', captured[0]['path'][7:])
# container GET
self.assertEqual('GET', captured[1]['method'])
self.assertEqual('a/c', captured[1]['path'][7:])
self.assertEqual('format=json', captured[1]['qs'])
self.assertEqual(
'shard', captured[1]['headers'].get('X-Backend-Record-Type'))
self.assertEqual(shard_ranges, [dict(pr) for pr in actual])
self.assertFalse(self.app.logger.get_lines_for_level('error'))
def test_get_shard_ranges_for_object_put(self):
ts_iter = make_timestamp_iter()
shard_ranges = [dict(ShardRange(
'.sharded_a/sr%d' % i, next(ts_iter), '%d_lower' % i,
'%d_upper' % i, object_count=i, bytes_used=1024 * i,
meta_timestamp=next(ts_iter)))
for i in range(3)]
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
resp_headers = {'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(
200, 200,
body_iter=iter([b'',
json.dumps(shard_ranges[1:2]).encode('ascii')]),
headers=resp_headers
) as fake_conn:
actual, resp = base._get_shard_ranges(req, 'a', 'c', '1_test')
self.assertEqual(200, resp.status_int)
# account info
captured = fake_conn.requests
self.assertEqual('HEAD', captured[0]['method'])
self.assertEqual('a', captured[0]['path'][7:])
# container GET
self.assertEqual('GET', captured[1]['method'])
self.assertEqual('a/c', captured[1]['path'][7:])
params = sorted(captured[1]['qs'].split('&'))
self.assertEqual(
['format=json', 'includes=1_test'], params)
self.assertEqual(
'shard', captured[1]['headers'].get('X-Backend-Record-Type'))
self.assertEqual(shard_ranges[1:2], [dict(pr) for pr in actual])
self.assertFalse(self.app.logger.get_lines_for_level('error'))
def test_get_shard_ranges_for_utf8_object_put(self):
ts_iter = make_timestamp_iter()
shard_ranges = [dict(ShardRange(
'.sharded_a/sr%d' % i, next(ts_iter), u'\u1234%d_lower' % i,
u'\u1234%d_upper' % i, object_count=i, bytes_used=1024 * i,
meta_timestamp=next(ts_iter)))
for i in range(3)]
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
resp_headers = {'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(
200, 200,
body_iter=iter([b'',
json.dumps(shard_ranges[1:2]).encode('ascii')]),
headers=resp_headers
) as fake_conn:
actual, resp = base._get_shard_ranges(
req, 'a', 'c', wsgi_to_str('\xe1\x88\xb41_test'))
self.assertEqual(200, resp.status_int)
# account info
captured = fake_conn.requests
self.assertEqual('HEAD', captured[0]['method'])
self.assertEqual('a', captured[0]['path'][7:])
# container GET
self.assertEqual('GET', captured[1]['method'])
self.assertEqual('a/c', captured[1]['path'][7:])
params = sorted(captured[1]['qs'].split('&'))
self.assertEqual(
['format=json', 'includes=%E1%88%B41_test'], params)
self.assertEqual(
'shard', captured[1]['headers'].get('X-Backend-Record-Type'))
self.assertEqual(shard_ranges[1:2], [dict(pr) for pr in actual])
self.assertFalse(self.app.logger.get_lines_for_level('error'))
def _check_get_shard_ranges_bad_data(self, body):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
# empty response
headers = {'X-Backend-Record-Type': 'shard'}
with mocked_http_conn(200, 200, body_iter=iter([b'', body]),
headers=headers):
actual, resp = base._get_shard_ranges(req, 'a', 'c', '1_test')
self.assertEqual(200, resp.status_int)
self.assertIsNone(actual)
lines = self.app.logger.get_lines_for_level('error')
return lines
def test_get_shard_ranges_empty_body(self):
error_lines = self._check_get_shard_ranges_bad_data(b'')
self.assertIn('Problem with listing response', error_lines[0])
if six.PY2:
self.assertIn('No JSON', error_lines[0])
else:
self.assertIn('JSONDecodeError', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_not_a_list(self):
body = json.dumps({}).encode('ascii')
error_lines = self._check_get_shard_ranges_bad_data(body)
self.assertIn('Problem with listing response', error_lines[0])
self.assertIn('not a list', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_key_missing(self):
body = json.dumps([{}]).encode('ascii')
error_lines = self._check_get_shard_ranges_bad_data(body)
self.assertIn('Failed to get shard ranges', error_lines[0])
self.assertIn('KeyError', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_invalid_shard_range(self):
sr = ShardRange('a/c', Timestamp.now())
bad_sr_data = dict(sr, name='bad_name')
body = json.dumps([bad_sr_data]).encode('ascii')
error_lines = self._check_get_shard_ranges_bad_data(body)
self.assertIn('Failed to get shard ranges', error_lines[0])
self.assertIn('ValueError', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_missing_record_type(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
sr = ShardRange('a/c', Timestamp.now())
body = json.dumps([dict(sr)]).encode('ascii')
with mocked_http_conn(
200, 200, body_iter=iter([b'', body])):
actual, resp = base._get_shard_ranges(req, 'a', 'c', '1_test')
self.assertEqual(200, resp.status_int)
self.assertIsNone(actual)
error_lines = self.app.logger.get_lines_for_level('error')
self.assertIn('Failed to get shard ranges', error_lines[0])
self.assertIn('unexpected record type', error_lines[0])
self.assertIn('/a/c', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_wrong_record_type(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
sr = ShardRange('a/c', Timestamp.now())
body = json.dumps([dict(sr)]).encode('ascii')
headers = {'X-Backend-Record-Type': 'object'}
with mocked_http_conn(
200, 200, body_iter=iter([b'', body]),
headers=headers):
actual, resp = base._get_shard_ranges(req, 'a', 'c', '1_test')
self.assertEqual(200, resp.status_int)
self.assertIsNone(actual)
error_lines = self.app.logger.get_lines_for_level('error')
self.assertIn('Failed to get shard ranges', error_lines[0])
self.assertIn('unexpected record type', error_lines[0])
self.assertIn('/a/c', error_lines[0])
self.assertFalse(error_lines[1:])
def test_get_shard_ranges_request_failed(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o', method='PUT')
with mocked_http_conn(200, 404, 404, 404):
actual, resp = base._get_shard_ranges(req, 'a', 'c', '1_test')
self.assertEqual(404, resp.status_int)
self.assertIsNone(actual)
self.assertFalse(self.app.logger.get_lines_for_level('error'))
warning_lines = self.app.logger.get_lines_for_level('warning')
self.assertIn('Failed to get container listing', warning_lines[0])
self.assertIn('/a/c', warning_lines[0])
self.assertFalse(warning_lines[1:])
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestNodeIter(BaseTest):
def test_iter_default_fake_ring(self):
for ring in (self.account_ring, self.container_ring):
self.assertEqual(ring.replica_count, 3.0)
node_iter = NodeIter(self.app, ring, 0, self.logger,
request=Request.blank(''))
self.assertEqual(6, node_iter.nodes_left)
self.assertEqual(3, node_iter.primaries_left)
count = 0
for node in node_iter:
count += 1
self.assertEqual(count, 3)
self.assertEqual(0, node_iter.primaries_left)
# default fake_ring has NO handoffs, so nodes_left is kind of a lie
self.assertEqual(3, node_iter.nodes_left)
def test_iter_with_handoffs(self):
ring = FakeRing(replicas=3, max_more_nodes=20) # handoffs available
policy = StoragePolicy(0, 'zero', object_ring=ring)
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=Request.blank(''))
self.assertEqual(6, node_iter.nodes_left)
self.assertEqual(3, node_iter.primaries_left)
primary_indexes = set()
handoff_indexes = []
count = 0
for node in node_iter:
if 'index' in node:
primary_indexes.add(node['index'])
else:
handoff_indexes.append(node['handoff_index'])
count += 1
self.assertEqual(count, 6)
self.assertEqual(0, node_iter.primaries_left)
self.assertEqual(0, node_iter.nodes_left)
self.assertEqual({0, 1, 2}, primary_indexes)
self.assertEqual([0, 1, 2], handoff_indexes)
def test_multi_iteration(self):
ring = FakeRing(replicas=8, max_more_nodes=20)
policy = StoragePolicy(0, 'ec', object_ring=ring)
# sanity
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=Request.blank(''))
self.assertEqual(16, len([n for n in node_iter]))
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=Request.blank(''))
self.assertEqual(16, node_iter.nodes_left)
self.assertEqual(8, node_iter.primaries_left)
pile = GreenAsyncPile(5)
def eat_node(node_iter):
return next(node_iter)
safe_iter = GreenthreadSafeIterator(node_iter)
for i in range(5):
pile.spawn(eat_node, safe_iter)
nodes = []
for node in pile:
nodes.append(node)
primary_indexes = {n['index'] for n in nodes}
self.assertEqual(5, len(primary_indexes))
self.assertEqual(3, node_iter.primaries_left)
# it's problematic we don't decrement nodes_left until we resume
self.assertEqual(12, node_iter.nodes_left)
for node in node_iter:
nodes.append(node)
self.assertEqual(17, len(nodes))
def test_annotate_node_with_use_replication(self):
ring = FakeRing(replicas=8, max_more_nodes=20)
policy = StoragePolicy(0, 'ec', object_ring=ring)
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=Request.blank(''))
for node in node_iter:
self.assertIn('use_replication', node)
self.assertFalse(node['use_replication'])
req = Request.blank('a/c')
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=req)
for node in node_iter:
self.assertIn('use_replication', node)
self.assertFalse(node['use_replication'])
req = Request.blank(
'a/c', headers={'x-backend-use-replication-network': 'False'})
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=req)
for node in node_iter:
self.assertIn('use_replication', node)
self.assertFalse(node['use_replication'])
req = Request.blank(
'a/c', headers={'x-backend-use-replication-network': 'yes'})
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, request=req)
for node in node_iter:
self.assertIn('use_replication', node)
self.assertTrue(node['use_replication'])
def test_iter_does_not_mutate_supplied_nodes(self):
ring = FakeRing(replicas=8, max_more_nodes=20)
policy = StoragePolicy(0, 'ec', object_ring=ring)
other_iter = ring.get_part_nodes(0)
node_iter = NodeIter(self.app, policy.object_ring, 0, self.logger,
policy=policy, node_iter=iter(other_iter),
request=Request.blank(''))
nodes = list(node_iter)
self.assertEqual(len(other_iter), len(nodes))
for node in nodes:
self.assertIn('use_replication', node)
self.assertFalse(node['use_replication'])
self.assertEqual(other_iter, ring.get_part_nodes(0))
class TestGetterSource(unittest.TestCase):
def _make_source(self, headers, node):
resp = StubResponse(200, headers=headers)
return GetterSource(self.app, resp, node)
def setUp(self):
self.app = FakeApp()
self.node = {'ip': '1.2.3.4', 'port': '999'}
self.headers = {'X-Timestamp': '1234567.12345'}
self.resp = StubResponse(200, headers=self.headers)
def test_init(self):
src = GetterSource(self.app, self.resp, self.node)
self.assertIs(self.app, src.app)
self.assertIs(self.resp, src.resp)
self.assertEqual(self.node, src.node)
def test_timestamp(self):
# first test the no timestamp header case. Defaults to 0.
headers = {}
src = self._make_source(headers, self.node)
self.assertIsInstance(src.timestamp, Timestamp)
self.assertEqual(Timestamp(0), src.timestamp)
# now x-timestamp
headers = dict(self.headers)
src = self._make_source(headers, self.node)
self.assertIsInstance(src.timestamp, Timestamp)
self.assertEqual(Timestamp(1234567.12345), src.timestamp)
headers['x-put-timestamp'] = '1234567.11111'
src = self._make_source(headers, self.node)
self.assertIsInstance(src.timestamp, Timestamp)
self.assertEqual(Timestamp(1234567.11111), src.timestamp)
headers['x-backend-timestamp'] = '1234567.22222'
src = self._make_source(headers, self.node)
self.assertIsInstance(src.timestamp, Timestamp)
self.assertEqual(Timestamp(1234567.22222), src.timestamp)
headers['x-backend-data-timestamp'] = '1234567.33333'
src = self._make_source(headers, self.node)
self.assertIsInstance(src.timestamp, Timestamp)
self.assertEqual(Timestamp(1234567.33333), src.timestamp)
def test_sort(self):
# verify sorting by timestamp
srcs = [
self._make_source({'X-Timestamp': '12345.12345'},
{'ip': '1.2.3.7', 'port': '9'}),
self._make_source({'X-Timestamp': '12345.12346'},
{'ip': '1.2.3.8', 'port': '8'}),
self._make_source({'X-Timestamp': '12345.12343',
'X-Put-Timestamp': '12345.12344'},
{'ip': '1.2.3.9', 'port': '7'}),
]
actual = sorted(srcs, key=operator.attrgetter('timestamp'))
self.assertEqual([srcs[2], srcs[0], srcs[1]], actual)
def test_close(self):
# verify close is robust...
# source has no resp
src = GetterSource(self.app, None, self.node)
src.close()
# resp has no swift_conn
src = GetterSource(self.app, self.resp, self.node)
self.assertFalse(hasattr(src.resp, 'swift_conn'))
src.close()
# verify close is plumbed through...
src.resp.swift_conn = mock.MagicMock()
src.resp.nuke_from_orbit = mock.MagicMock()
src.close()
src.resp.nuke_from_orbit.assert_called_once_with()
| swift-master | test/unit/proxy/controllers/test_base.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import time
from mock import Mock
from swift.proxy.controllers import InfoController
from swift.proxy.server import Application as ProxyApp
from swift.common import registry, digest
from swift.common.swob import Request, HTTPException
from test.debug_logger import debug_logger
class TestInfoController(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def get_controller(self, expose_info=None, disallowed_sections=None,
admin_key=None):
disallowed_sections = disallowed_sections or []
app = Mock(spec=ProxyApp, logger=debug_logger())
return InfoController(app, None, expose_info,
disallowed_sections, admin_key)
def start_response(self, status, headers):
self.got_statuses.append(status)
for h in headers:
self.got_headers.append({h[0]: h[1]})
def test_disabled_info(self):
controller = self.get_controller(expose_info=False)
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('403 Forbidden', str(resp))
def test_get_info(self):
controller = self.get_controller(expose_info=True)
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('admin', info)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
def test_options_info(self):
controller = self.get_controller(expose_info=True)
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.OPTIONS(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
self.assertIn('Allow', resp.headers)
def test_get_info_cors(self):
controller = self.get_controller(expose_info=True)
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://example.com'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('admin', info)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
self.assertIn('Access-Control-Allow-Origin', resp.headers)
self.assertIn('Access-Control-Expose-Headers', resp.headers)
def test_head_info(self):
controller = self.get_controller(expose_info=True)
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.HEAD(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
def test_disallow_info(self):
controller = self.get_controller(expose_info=True,
disallowed_sections=['foo2'])
registry._swift_info = {'foo': {'bar': 'baz'},
'foo2': {'bar2': 'baz2'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
self.assertNotIn('foo2', info)
def test_disabled_admin_info(self):
controller = self.get_controller(expose_info=True, admin_key='')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/info', expires, '')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('403 Forbidden', str(resp))
def test_get_admin_info(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertIn('admin', info)
self.assertIn('qux', info['admin'])
self.assertIn('quux', info['admin']['qux'])
self.assertEqual(info['admin']['qux']['quux'], 'corge')
def test_head_admin_info(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
expires = int(time.time() + 86400)
sig = digest.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
def test_get_admin_info_invalid_method(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_expires(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = 1
sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
expires = 'abc'
sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_path(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/foo', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_key(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/foo', expires, 'invalid-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_admin_disallow_info(self):
controller = self.get_controller(expose_info=True,
disallowed_sections=['foo2'],
admin_key='secret-admin-key')
registry._swift_info = {'foo': {'bar': 'baz'},
'foo2': {'bar2': 'baz2'}}
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('foo2', info)
self.assertIn('admin', info)
self.assertIn('disallowed_sections', info['admin'])
self.assertIn('foo2', info['admin']['disallowed_sections'])
self.assertIn('qux', info['admin'])
self.assertIn('quux', info['admin']['qux'])
self.assertEqual(info['admin']['qux']['quux'], 'corge')
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/proxy/controllers/test_info.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import sys
from subprocess import check_output
class TestTranslations(unittest.TestCase):
def test_translations(self):
translated_message = check_output([sys.executable, __file__], env={
# Need to set this so py36 can do UTF-8, but we override later
'LC_ALL': 'en_US.UTF-8',
# Nothing else should be in the env, so we won't taint our test
})
self.assertEqual(translated_message,
u'prova mesa\u011do\n'.encode('utf-8'))
if __name__ == "__main__":
path = os.path.realpath(__file__)
# Override the language and localedir *before* importing swift
# so we get translations
os.environ['LC_ALL'] = 'eo'
os.environ['SWIFT_LOCALEDIR'] = os.path.dirname(path)
# Make sure we can find swift
sys.path.insert(0, os.path.sep.join(path.split(os.path.sep)[:-4]))
from swift import gettext_ as _
print(_('test message'))
| swift-master | test/unit/test_locale/test_locale.py |
swift-master | test/unit/test_locale/__init__.py |
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from swift.cli import dispersion_report
class TestDispersionReport(unittest.TestCase):
def test_placeholder(self):
self.assertTrue(callable(dispersion_report.main))
| swift-master | test/unit/cli/test_dispersion_report.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Samuel Merritt <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import mock
import six
import unittest
from swift.cli import form_signature
class TestFormSignature(unittest.TestCase):
def test_prints_signature(self):
the_time = 1406143563.020043
key = 'secret squirrel'
expires = 3600
path = '/v1/a/c/o'
redirect = 'https://example.com/done.html'
max_file_size = str(int(1024 * 1024 * 1024 * 3.14159)) # π GiB
max_file_count = '3'
data = "\n".join((
path, redirect, max_file_size, max_file_count,
str(int(the_time + expires))))
if six.PY3:
data = data if isinstance(data, six.binary_type) else \
data.encode('utf8')
key = key if isinstance(key, six.binary_type) else \
key.encode('utf8')
expected_signature = hmac.new(key, data, hashlib.sha1).hexdigest()
out = six.StringIO()
with mock.patch('swift.cli.form_signature.time', lambda: the_time):
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
path, redirect, max_file_size,
max_file_count, str(expires), key])
self.assertEqual(exitcode, 0)
self.assertIn("Signature: %s" % expected_signature,
out.getvalue())
self.assertIn("Expires: %d" % (the_time + expires,),
out.getvalue())
sig_input = ('<input type="hidden" name="signature" value="%s" />'
% expected_signature)
self.assertIn(sig_input, out.getvalue())
def test_too_few_args(self):
out = six.StringIO()
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
'/v1/a/c/o', '', '12', '34', '3600'])
self.assertNotEqual(exitcode, 0)
usage = 'Syntax: swift-form-signature <path>'
self.assertIn(usage, out.getvalue())
def test_invalid_filesize_arg(self):
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
'/v1/a/c/o', '', '-1', '34', '3600', key])
self.assertNotEqual(exitcode, 0)
def test_invalid_filecount_arg(self):
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
'/v1/a/c/o', '', '12', '-34', '3600', key])
self.assertNotEqual(exitcode, 0)
def test_invalid_path_arg(self):
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
'/v1/a/', '', '12', '34', '3600', key])
self.assertNotEqual(exitcode, 0)
def test_invalid_seconds_arg(self):
out = six.StringIO()
key = 'secret squirrel'
with mock.patch('sys.stdout', out):
exitcode = form_signature.main([
'/path/to/swift-form-signature',
'/v1/a/c/o', '', '12', '34',
'-922337203685477580799999999999999', key])
self.assertNotEqual(exitcode, 0)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/cli/test_form_signature.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import unittest
from argparse import Namespace
from textwrap import dedent
import mock
from shutil import rmtree
from tempfile import mkdtemp
import six
from six.moves import cStringIO as StringIO
from swift.cli.manage_shard_ranges import main
from swift.common import utils
from swift.common.utils import Timestamp, ShardRange
from swift.container.backend import ContainerBroker
from swift.container.sharder import make_shard_ranges
from test.unit import mock_timestamp_now, make_timestamp_iter, with_tempdir
class TestManageShardRanges(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_find_shards')
utils.mkdirs(self.testdir)
rmtree(self.testdir)
self.shard_data = [
{'index': 0, 'lower': '', 'upper': 'obj09',
'object_count': 10},
{'index': 1, 'lower': 'obj09', 'upper': 'obj19',
'object_count': 10},
{'index': 2, 'lower': 'obj19', 'upper': 'obj29',
'object_count': 10},
{'index': 3, 'lower': 'obj29', 'upper': 'obj39',
'object_count': 10},
{'index': 4, 'lower': 'obj39', 'upper': 'obj49',
'object_count': 10},
{'index': 5, 'lower': 'obj49', 'upper': 'obj59',
'object_count': 10},
{'index': 6, 'lower': 'obj59', 'upper': 'obj69',
'object_count': 10},
{'index': 7, 'lower': 'obj69', 'upper': 'obj79',
'object_count': 10},
{'index': 8, 'lower': 'obj79', 'upper': 'obj89',
'object_count': 10},
{'index': 9, 'lower': 'obj89', 'upper': '',
'object_count': 10},
]
self.overlap_shard_data_1 = [
{'index': 0, 'lower': '', 'upper': 'obj10',
'object_count': 1},
{'index': 1, 'lower': 'obj10', 'upper': 'obj20',
'object_count': 1},
{'index': 2, 'lower': 'obj20', 'upper': 'obj30',
'object_count': 1},
{'index': 3, 'lower': 'obj30', 'upper': 'obj39',
'object_count': 1},
{'index': 4, 'lower': 'obj39', 'upper': 'obj49',
'object_count': 1},
{'index': 5, 'lower': 'obj49', 'upper': 'obj58',
'object_count': 1},
{'index': 6, 'lower': 'obj58', 'upper': 'obj68',
'object_count': 1},
{'index': 7, 'lower': 'obj68', 'upper': 'obj78',
'object_count': 1},
{'index': 8, 'lower': 'obj78', 'upper': 'obj88',
'object_count': 1},
{'index': 9, 'lower': 'obj88', 'upper': '',
'object_count': 1},
]
self.overlap_shard_data_2 = [
{'index': 0, 'lower': '', 'upper': 'obj11', 'object_count': 1},
{'index': 1, 'lower': 'obj11', 'upper': 'obj21',
'object_count': 1},
]
def tearDown(self):
rmtree(os.path.dirname(self.testdir))
def assert_shard_ranges_equal(self, expected, actual):
self.assertEqual([dict(sr) for sr in expected],
[dict(sr) for sr in actual])
def assert_starts_with(self, value, prefix):
self.assertTrue(value.startswith(prefix),
"%r does not start with %r" % (value, prefix))
def assert_formatted_json(self, output, expected):
try:
loaded = json.loads(output)
except ValueError as err:
self.fail('Invalid JSON: %s\n%r' % (err, output))
# Check this one first, for a prettier diff
self.assertEqual(loaded, expected)
formatted = json.dumps(expected, sort_keys=True, indent=2) + '\n'
self.assertEqual(output, formatted)
def _make_broker(self, account='a', container='c',
device='sda', part=0):
datadir = os.path.join(
self.testdir, device, 'containers', str(part), 'ash', 'hash')
db_file = os.path.join(datadir, 'hash.db')
broker = ContainerBroker(
db_file, account=account, container=container)
broker.initialize()
return broker
def _move_broker_to_sharded_state(self, broker):
epoch = Timestamp.now()
broker.enable_sharding(epoch)
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED, epoch)
broker.merge_shard_ranges([own_sr])
return epoch
def test_conf_file_options(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file, account='a', container='c')
broker.initialize()
conf = """
[container-sharder]
shrink_threshold = 150
expansion_limit = 650
shard_container_threshold = 1000
rows_per_shard = 600
max_shrinking = 33
max_expanding = 31
minimum_shard_size = 88
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
# default values
with mock.patch('swift.cli.manage_shard_ranges.find_ranges',
return_value=0) as mocked:
ret = main([db_file, 'find'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=None,
path_to_file=mock.ANY,
func=mock.ANY,
rows_per_shard=500000,
subcommand='find',
force_commits=False,
verbose=0,
minimum_shard_size=100000)
mocked.assert_called_once_with(mock.ANY, expected)
# conf file
with mock.patch('swift.cli.manage_shard_ranges.find_ranges',
return_value=0) as mocked:
ret = main([db_file, '--config', conf_file, 'find'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
rows_per_shard=600,
subcommand='find',
force_commits=False,
verbose=0,
minimum_shard_size=88)
mocked.assert_called_once_with(mock.ANY, expected)
# cli options override conf file
with mock.patch('swift.cli.manage_shard_ranges.find_ranges',
return_value=0) as mocked:
ret = main([db_file, '--config', conf_file, 'find', '12345',
'--minimum-shard-size', '99'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
rows_per_shard=12345,
subcommand='find',
force_commits=False,
verbose=0,
minimum_shard_size=99)
mocked.assert_called_once_with(mock.ANY, expected)
# default values
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges',
return_value=0) as mocked:
ret = main([db_file, 'compact'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=None,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=-1,
max_shrinking=1,
shrink_threshold=100000,
expansion_limit=750000,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
# conf file
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges',
return_value=0) as mocked:
ret = main([db_file, '--config', conf_file, 'compact'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=31,
max_shrinking=33,
shrink_threshold=150,
expansion_limit=650,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
# cli options
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges',
return_value=0) as mocked:
ret = main([db_file, '--config', conf_file, 'compact',
'--max-shrinking', '22',
'--max-expanding', '11',
'--expansion-limit', '3456',
'--shrink-threshold', '1234'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=11,
max_shrinking=22,
shrink_threshold=1234,
expansion_limit=3456,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
def test_conf_file_deprecated_options(self):
# verify that deprecated percent-based do get applied
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file, account='a', container='c')
broker.initialize()
conf = """
[container-sharder]
shard_shrink_point = 15
shard_shrink_merge_point = 65
shard_container_threshold = 1000
max_shrinking = 33
max_expanding = 31
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges',
return_value=0) as mocked:
ret = main([db_file, '--config', conf_file, 'compact'])
self.assertEqual(0, ret)
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=31,
max_shrinking=33,
shrink_threshold=150,
expansion_limit=650,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
# absolute value options take precedence if specified in the conf file
conf = """
[container-sharder]
shard_shrink_point = 15
shrink_threshold = 123
shard_shrink_merge_point = 65
expansion_limit = 456
shard_container_threshold = 1000
max_shrinking = 33
max_expanding = 31
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges') \
as mocked:
main([db_file, '--config', conf_file, 'compact'])
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=31,
max_shrinking=33,
shrink_threshold=123,
expansion_limit=456,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
# conf file - small percentages resulting in zero absolute values
# should be respected rather than falling back to defaults, to avoid
# nasty surprises
conf = """
[container-sharder]
shard_shrink_point = 1
shard_shrink_merge_point = 2
shard_container_threshold = 10
max_shrinking = 33
max_expanding = 31
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
with mock.patch('swift.cli.manage_shard_ranges.compact_shard_ranges') \
as mocked:
main([db_file, '--config', conf_file, 'compact'])
expected = Namespace(conf_file=conf_file,
path_to_file=mock.ANY,
func=mock.ANY,
subcommand='compact',
force_commits=False,
verbose=0,
max_expanding=31,
max_shrinking=33,
shrink_threshold=0,
expansion_limit=0,
yes=False,
dry_run=False)
mocked.assert_called_once_with(mock.ANY, expected)
def test_conf_file_invalid(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file, account='a', container='c')
broker.initialize()
# conf file - invalid value for shard_container_threshold
conf = """
[container-sharder]
shrink_threshold = 1
expansion_limit = 2
shard_container_threshold = 0
max_shrinking = 33
max_expanding = 31
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, '--config', conf_file, 'compact'])
self.assertEqual(2, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Error loading config')
self.assertIn('shard_container_threshold', err_lines[0])
def test_conf_file_invalid_deprecated_options(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file, account='a', container='c')
broker.initialize()
# conf file - invalid value for shard_container_threshold
conf = """
[container-sharder]
shard_shrink_point = -1
shard_shrink_merge_point = 2
shard_container_threshold = 1000
max_shrinking = 33
max_expanding = 31
"""
conf_file = os.path.join(self.testdir, 'sharder.conf')
with open(conf_file, 'w') as fd:
fd.write(dedent(conf))
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
main([db_file, '--config', conf_file, 'compact'])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Error loading config')
self.assertIn('shard_shrink_point', err_lines[0])
def test_conf_file_does_not_exist(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file, account='a', container='c')
broker.initialize()
conf_file = os.path.join(self.testdir, 'missing_sharder.conf')
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, '--config', conf_file, 'compact'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Error opening config file')
def test_find_shard_ranges(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file)
broker.account = 'a'
broker.container = 'c'
broker.initialize()
ts = utils.Timestamp.now()
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 0,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
# Default uses a large enough value that sharding isn't required
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find'])
self.assertEqual(0, ret)
self.assert_formatted_json(out.getvalue(), [])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assert_starts_with(err_lines[1], 'Found 0 ranges in ')
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '100'])
self.assertEqual(0, ret)
self.assert_formatted_json(out.getvalue(), [])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assert_starts_with(err_lines[1], 'Found 0 ranges in ')
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '99', '--minimum-shard-size', '1'])
self.assertEqual(0, ret)
self.assert_formatted_json(out.getvalue(), [
{'index': 0, 'lower': '', 'upper': 'obj98', 'object_count': 99},
{'index': 1, 'lower': 'obj98', 'upper': '', 'object_count': 1},
])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assert_starts_with(err_lines[1], 'Found 2 ranges in ')
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '10'])
self.assertEqual(0, ret)
self.assert_formatted_json(out.getvalue(), [
{'index': 0, 'lower': '', 'upper': 'obj09', 'object_count': 10},
{'index': 1, 'lower': 'obj09', 'upper': 'obj19',
'object_count': 10},
{'index': 2, 'lower': 'obj19', 'upper': 'obj29',
'object_count': 10},
{'index': 3, 'lower': 'obj29', 'upper': 'obj39',
'object_count': 10},
{'index': 4, 'lower': 'obj39', 'upper': 'obj49',
'object_count': 10},
{'index': 5, 'lower': 'obj49', 'upper': 'obj59',
'object_count': 10},
{'index': 6, 'lower': 'obj59', 'upper': 'obj69',
'object_count': 10},
{'index': 7, 'lower': 'obj69', 'upper': 'obj79',
'object_count': 10},
{'index': 8, 'lower': 'obj79', 'upper': 'obj89',
'object_count': 10},
{'index': 9, 'lower': 'obj89', 'upper': '', 'object_count': 10},
])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assert_starts_with(err_lines[1], 'Found 10 ranges in ')
def test_find_shard_ranges_with_minimum_size(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file)
broker.account = 'a'
broker.container = 'c'
broker.initialize()
ts = utils.Timestamp.now()
# with 105 objects and rows_per_shard = 50 there is the potential for a
# tail shard of size 5
broker.merge_items([
{'name': 'obj%03d' % i, 'created_at': ts.internal, 'size': 0,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(105)])
def assert_tail_shard_not_extended(minimum):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '50',
'--minimum-shard-size', str(minimum)])
self.assertEqual(0, ret)
self.assert_formatted_json(out.getvalue(), [
{'index': 0, 'lower': '', 'upper': 'obj049',
'object_count': 50},
{'index': 1, 'lower': 'obj049', 'upper': 'obj099',
'object_count': 50},
{'index': 2, 'lower': 'obj099', 'upper': '',
'object_count': 5},
])
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assert_starts_with(err_lines[1], 'Found 3 ranges in ')
# tail shard size > minimum
assert_tail_shard_not_extended(1)
assert_tail_shard_not_extended(4)
assert_tail_shard_not_extended(5)
def assert_tail_shard_extended(minimum):
out = StringIO()
err = StringIO()
if minimum is not None:
extra_args = ['--minimum-shard-size', str(minimum)]
else:
extra_args = []
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '50'] + extra_args)
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_formatted_json(out.getvalue(), [
{'index': 0, 'lower': '', 'upper': 'obj049',
'object_count': 50},
{'index': 1, 'lower': 'obj049', 'upper': '',
'object_count': 55},
])
self.assert_starts_with(err_lines[1], 'Found 2 ranges in ')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
# sanity check - no minimum specified, defaults to rows_per_shard/5
assert_tail_shard_extended(None)
assert_tail_shard_extended(6)
assert_tail_shard_extended(50)
def assert_too_large_value_handled(minimum):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([db_file, 'find', '50',
'--minimum-shard-size', str(minimum)])
self.assertEqual(2, ret)
self.assertEqual(
'Invalid config: minimum_shard_size (%s) must be <= '
'rows_per_shard (50)' % minimum, err.getvalue().strip())
assert_too_large_value_handled(51)
assert_too_large_value_handled(52)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with self.assertRaises(SystemExit):
main([db_file, 'find', '50', '--minimum-shard-size', '-1'])
def test_info(self):
broker = self._make_broker()
ts = next(self.ts_iter)
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 9,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'info'])
self.assertEqual(0, ret)
expected = ['Sharding enabled = True',
'Own shard range: None',
'db_state = unsharded',
'object_count = 100',
'bytes_used = 900',
'Metadata:',
' X-Container-Sysmeta-Sharding = True']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
retiring_db_id = broker.get_info()['id']
broker.merge_shard_ranges(ShardRange('.shards/cc', Timestamp.now()))
epoch = Timestamp.now()
with mock_timestamp_now(epoch) as now:
broker.enable_sharding(epoch)
self.assertTrue(broker.set_sharding_state())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with mock_timestamp_now(now):
ret = main([broker.db_file, 'info'])
self.assertEqual(0, ret)
expected = ['Sharding enabled = True',
'Own shard range: {',
' "bytes_used": 0,',
' "deleted": 0,',
' "epoch": "%s",' % epoch.internal,
' "lower": "",',
' "meta_timestamp": "%s",' % now.internal,
' "name": "a/c",',
' "object_count": 0,',
' "reported": 0,',
' "state": "sharding",',
' "state_timestamp": "%s",' % now.internal,
' "timestamp": "%s",' % now.internal,
' "tombstones": -1,',
' "upper": ""',
'}',
'db_state = sharding',
'object_count = 100',
'bytes_used = 900',
'Retiring db id: %s' % retiring_db_id,
'Cleaving context: {',
' "cleave_to_row": null,',
' "cleaving_done": false,',
' "cursor": "",',
' "last_cleave_to_row": null,',
' "max_row": 100,',
' "misplaced_done": false,',
' "ranges_done": 0,',
' "ranges_todo": 0,',
' "ref": "%s"' % retiring_db_id,
'}',
'Metadata:',
' X-Container-Sysmeta-Sharding = True']
# The json.dumps() in py2 produces trailing space, not in py3.
result = [x.rstrip() for x in out.getvalue().splitlines()]
self.assertEqual(expected, result)
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertTrue(broker.set_sharded_state())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with mock_timestamp_now(now):
ret = main([broker.db_file, 'info'])
self.assertEqual(0, ret)
expected = ['Sharding enabled = True',
'Own shard range: {',
' "bytes_used": 0,',
' "deleted": 0,',
' "epoch": "%s",' % epoch.internal,
' "lower": "",',
' "meta_timestamp": "%s",' % now.internal,
' "name": "a/c",',
' "object_count": 0,',
' "reported": 0,',
' "state": "sharding",',
' "state_timestamp": "%s",' % now.internal,
' "timestamp": "%s",' % now.internal,
' "tombstones": -1,',
' "upper": ""',
'}',
'db_state = sharded',
# in sharded state the object stats are determined by the
# shard ranges, and we haven't created any in the test...
'object_count = 0',
'bytes_used = 0',
'Metadata:',
' X-Container-Sysmeta-Sharding = True']
self.assertEqual(expected,
[x.rstrip() for x in out.getvalue().splitlines()])
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
def test_show(self):
broker = self._make_broker()
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'show'])
self.assertEqual(0, ret)
expected = [
'Loaded db broker for a/c',
'No shard data found.',
]
self.assertEqual(expected, err.getvalue().splitlines())
self.assertEqual('', out.getvalue())
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
expected_shard_ranges = [
dict(sr, state=ShardRange.STATES[sr.state])
for sr in shard_ranges
]
broker.merge_shard_ranges(shard_ranges)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'show'])
self.assertEqual(0, ret)
expected = [
'Loaded db broker for a/c',
'Existing shard ranges:',
]
self.assertEqual(expected, err.getvalue().splitlines())
self.assertEqual(expected_shard_ranges, json.loads(out.getvalue()))
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'show', '--includes', 'foo'])
self.assertEqual(0, ret)
expected = [
'Loaded db broker for a/c',
'Existing shard ranges:',
]
self.assertEqual(expected, err.getvalue().splitlines())
self.assertEqual(expected_shard_ranges[:1], json.loads(out.getvalue()))
def test_merge(self):
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
good_shard_ranges = []
for shard in self.shard_data[:3]:
good_shard_ranges.append(ShardRange(name='a/c_' + shard['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=shard['lower'],
upper=shard['upper']))
# insert an overlap..
bad_shard_range = ShardRange(
name='a/c_bad_' + self.shard_data[1]['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=self.shard_data[1]['lower'],
upper=self.shard_data[2]['upper'])
broker.merge_shard_ranges(good_shard_ranges + [bad_shard_range])
self.assertEqual(
[('', 'obj09'),
('obj09', 'obj19'),
('obj09', 'obj29'),
('obj19', 'obj29')],
[(sr.lower_str, sr.upper_str) for sr in broker.get_shard_ranges()])
# use command to merge in a deleted version of the bad shard range
bad_shard_range.update_state(ShardRange.SHRUNK,
state_timestamp=next(self.ts_iter))
bad_shard_range.set_deleted(next(self.ts_iter))
bad_shard_range.update_meta(0, 0, next(self.ts_iter))
input_file = os.path.join(self.testdir, 'shards')
with open(input_file, 'w') as fd:
json.dump([dict(bad_shard_range)], fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, '-v', 'merge', input_file,
'--replace-timeout', '1', '--yes'])
self.assertEqual(0, ret)
affected_shard_ranges = [dict(sr) for sr in good_shard_ranges]
expected_msg = [
'This change will result in the following shard ranges in the '
'affected namespace:']
expected_msg.extend(
json.dumps(affected_shard_ranges, indent=2).splitlines())
expected_msg.extend(
['Injected 1 shard ranges.',
'Run container-replicator to replicate them to other nodes.'])
self.assertEqual(expected_msg, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[dict(sr) for sr in good_shard_ranges],
[dict(sr) for sr in broker.get_shard_ranges()])
self.assertEqual(
dict(bad_shard_range),
dict(broker.get_shard_ranges(include_deleted=True)[3]))
def test_merge_fills_gap(self):
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
old_shard_ranges = []
for shard in self.shard_data[:1]:
old_shard_ranges.append(ShardRange(name='a/c_' + shard['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=shard['lower'],
upper=shard['upper']))
# use command to merge in a deleted version of the existing and two
# new ranges
new_shard_ranges = [
old_shard_ranges[0].copy(deleted=True,
timestamp=next(self.ts_iter)),
ShardRange(
name='a/c_1_' + self.shard_data[0]['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=self.shard_data[0]['lower'],
upper=self.shard_data[0]['upper'] + 'a'),
ShardRange(
name='a/c_1_' + self.shard_data[0]['upper'] + 'a',
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=self.shard_data[0]['upper'] + 'a',
upper=self.shard_data[1]['upper'] + 'a'),
]
input_file = os.path.join(self.testdir, 'shards')
with open(input_file, 'w') as fd:
json.dump([dict(sr) for sr in new_shard_ranges], fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, '-v', 'merge', input_file,
'--replace-timeout', '1', '--yes'])
self.assertEqual(0, ret)
affected_shard_ranges = [dict(sr) for sr in new_shard_ranges[1:]]
expected_msg = [
'This change will result in the following shard ranges in the '
'affected namespace:']
expected_msg.extend(
json.dumps(affected_shard_ranges, indent=2).splitlines())
expected_msg.extend(
['Injected 3 shard ranges.',
'Run container-replicator to replicate them to other nodes.'])
self.assertEqual(expected_msg, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[dict(sr) for sr in new_shard_ranges[1:]],
[dict(sr) for sr in broker.get_shard_ranges()])
self.assertEqual(
[dict(sr) for sr in new_shard_ranges],
[dict(sr) for sr in broker.get_shard_ranges(include_deleted=True)])
def test_merge_warns_of_overlap(self):
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
old_shard_ranges = []
for shard in self.shard_data[:3]:
old_shard_ranges.append(ShardRange(name='a/c_' + shard['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=shard['lower'],
upper=shard['upper']))
broker.merge_shard_ranges(old_shard_ranges)
# use command to merge in a new range that overlaps...
new_shard_range = ShardRange(
name='a/c_bad_' + self.shard_data[1]['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=self.shard_data[1]['lower'] + 'a',
upper=self.shard_data[1]['upper'])
input_file = os.path.join(self.testdir, 'shards')
with open(input_file, 'w') as fd:
json.dump([dict(new_shard_range)], fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, '-v', 'merge', input_file,
'--replace-timeout', '1', '-n'])
self.assertEqual(3, ret)
affected_shard_ranges = [
dict(sr) for sr in [old_shard_ranges[0], old_shard_ranges[1],
new_shard_range, old_shard_ranges[2]]]
expected_msg = [
'This change will result in the following shard ranges in the '
'affected namespace:']
expected_msg.extend(
json.dumps(affected_shard_ranges, indent=2).splitlines())
expected_msg.extend(
['WARNING: this change will result in shard ranges overlaps!',
'No changes applied'])
self.assertEqual(expected_msg, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[dict(sr) for sr in old_shard_ranges],
[dict(sr) for sr in broker.get_shard_ranges()])
# repeat without -v flag
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'merge', input_file,
'--replace-timeout', '1', '-n'])
self.assertEqual(3, ret)
expected_msg = [
'WARNING: this change will result in shard ranges overlaps!',
'No changes applied']
self.assertEqual(expected_msg, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[dict(sr) for sr in old_shard_ranges],
[dict(sr) for sr in broker.get_shard_ranges()])
def test_merge_warns_of_gap(self):
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
old_shard_ranges = []
for shard in self.shard_data[:3]:
old_shard_ranges.append(ShardRange(name='a/c_' + shard['lower'],
timestamp=next(self.ts_iter),
state=ShardRange.ACTIVE,
lower=shard['lower'],
upper=shard['upper']))
broker.merge_shard_ranges(old_shard_ranges)
# use command to merge in a deleted range that creates a gap...
new_shard_range = old_shard_ranges[1].copy(
timestamp=next(self.ts_iter), deleted=True)
input_file = os.path.join(self.testdir, 'shards')
with open(input_file, 'w') as fd:
json.dump([dict(new_shard_range)], fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, '-v', 'merge', input_file,
'--replace-timeout', '1', '-n'])
self.assertEqual(3, ret)
affected_shard_ranges = [
dict(sr) for sr in [old_shard_ranges[0], old_shard_ranges[2]]]
expected_msg = [
'This change will result in the following shard ranges in the '
'affected namespace:']
expected_msg.extend(
json.dumps(affected_shard_ranges, indent=2).splitlines())
expected_msg.extend(
['WARNING: this change will result in shard ranges gaps!',
'No changes applied'])
self.assertEqual(expected_msg, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[dict(sr) for sr in old_shard_ranges],
[dict(sr) for sr in broker.get_shard_ranges()])
def test_replace(self):
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
input_file = os.path.join(self.testdir, 'shards')
with open(input_file, 'w') as fd:
json.dump(self.shard_data, fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'replace', input_file])
self.assertEqual(0, ret)
expected = [
'No shard ranges found to delete.',
'Injected 10 shard ranges.',
'Run container-replicator to replicate them to other nodes.',
'Use the enable sub-command to enable sharding.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[(data['lower'], data['upper']) for data in self.shard_data],
[(sr.lower_str, sr.upper_str) for sr in broker.get_shard_ranges()])
def check_user_exit(user_input):
# try again now db has shard ranges, simulate user quitting
with open(input_file, 'w') as fd:
json.dump(self.overlap_shard_data_1, fd)
out = StringIO()
err = StringIO()
to_patch = 'swift.cli.manage_shard_ranges.input'
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock.patch(to_patch, side_effect=[user_input]):
ret = main([broker.db_file, 'replace', input_file])
self.assertEqual(3, ret)
expected = ['This will delete existing 10 shard ranges.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self.assertEqual(
[(data['lower'], data['upper']) for data in self.shard_data],
[(sr.lower_str, sr.upper_str)
for sr in broker.get_shard_ranges()])
check_user_exit('q')
check_user_exit(EOFError)
check_user_exit(KeyboardInterrupt)
def test_analyze_stdin(self):
out = StringIO()
err = StringIO()
stdin = StringIO()
stdin.write(json.dumps([])) # empty but valid json
stdin.seek(0)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock.patch('sys.stdin', stdin):
ret = main(['-', 'analyze'])
self.assertEqual(1, ret)
expected = [
'Found no complete sequence of shard ranges.',
'Repairs necessary to fill gaps.',
'Gap filling not supported by this tool. No repairs performed.',
]
self.assertEqual(expected, out.getvalue().splitlines())
broker = self._make_broker()
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
shard_ranges = [
dict(sr, state=ShardRange.STATES[sr.state])
for sr in make_shard_ranges(broker, self.shard_data, '.shards_')
]
out = StringIO()
err = StringIO()
stdin = StringIO()
stdin.write(json.dumps(shard_ranges))
stdin.seek(0)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock.patch('sys.stdin', stdin):
ret = main(['-', 'analyze'])
self.assertEqual(0, ret)
expected = [
'Found one complete sequence of 10 shard ranges '
'and no overlapping shard ranges.',
'No repairs necessary.',
]
self.assertEqual(expected, out.getvalue().splitlines())
def test_analyze_stdin_with_overlaps(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_1 = make_shard_ranges(
broker, self.overlap_shard_data_1, '.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_1)
shard_ranges = [
dict(sr, state=ShardRange.STATES[sr.state])
for sr in broker.get_shard_ranges()
]
out = StringIO()
err = StringIO()
stdin = StringIO()
stdin.write(json.dumps(shard_ranges))
stdin.seek(0)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock.patch('sys.stdin', stdin):
ret = main(['-', 'analyze'])
self.assertEqual(0, ret)
expected = [
'Repairs necessary to remove overlapping shard ranges.',
'Chosen a complete sequence of 10 shard ranges with '
'current total of 100 object records to accept object records '
'from 10 overlapping donor shard ranges.',
'Once applied to the broker these changes will result in:',
' 10 shard ranges being removed.',
' 10 object records being moved to the chosen shard ranges.',
]
self.assertEqual(expected, out.getvalue().splitlines())
def _assert_enabled(self, broker, epoch):
own_sr = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDING, own_sr.state)
self.assertEqual(epoch, own_sr.epoch)
self.assertEqual(ShardRange.MIN, own_sr.lower)
self.assertEqual(ShardRange.MAX, own_sr.upper)
self.assertEqual(
'True', broker.metadata['X-Container-Sysmeta-Sharding'][0])
def test_enable(self):
broker = self._make_broker()
ts = next(self.ts_iter)
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 9,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
broker.update_metadata({'X-Container-Sysmeta-Sharding':
(True, Timestamp.now().internal)})
# no shard ranges
out = StringIO()
err = StringIO()
with self.assertRaises(SystemExit) as cm:
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
main([broker.db_file, 'enable'])
self.assertEqual(1, cm.exception.code)
expected = ["WARNING: invalid shard ranges: ['No shard ranges.'].",
'Aborting.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
# success
shard_ranges = []
for data in self.shard_data:
path = ShardRange.make_path(
'.shards_a', 'c', 'c', Timestamp.now(), data['index'])
shard_ranges.append(
ShardRange(path, Timestamp.now(), data['lower'],
data['upper'], data['object_count'], bytes_used=9))
broker.merge_shard_ranges(shard_ranges)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with mock_timestamp_now() as now:
ret = main([broker.db_file, 'enable'])
self.assertEqual(0, ret)
expected = [
"Container moved to state 'sharding' with epoch %s." %
now.internal,
'Run container-sharder on all nodes to shard the container.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self._assert_enabled(broker, now)
self.assertEqual(100, broker.get_info()['object_count'])
self.assertEqual(100, broker.get_own_shard_range().object_count)
self.assertEqual(900, broker.get_info()['bytes_used'])
self.assertEqual(900, broker.get_own_shard_range().bytes_used)
# already enabled
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'enable'])
self.assertEqual(0, ret)
expected = [
"Container already in state 'sharding' with epoch %s." %
now.internal,
'No action required.',
'Run container-sharder on all nodes to shard the container.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self._assert_enabled(broker, now)
def test_find_replace_enable(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file)
broker.account = 'a'
broker.container = 'c'
broker.initialize()
ts = utils.Timestamp.now()
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 0,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with mock_timestamp_now() as now:
ret = main([broker.db_file, 'find_and_replace', '10',
'--enable'])
self.assertEqual(0, ret)
expected = [
'No shard ranges found to delete.',
'Injected 10 shard ranges.',
'Run container-replicator to replicate them to other nodes.',
"Container moved to state 'sharding' with epoch %s." %
now.internal,
'Run container-sharder on all nodes to shard the container.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
self._assert_enabled(broker, now)
found_shard_ranges = broker.get_shard_ranges()
self.assertEqual(
[(data['lower'], data['upper']) for data in self.shard_data],
[(sr.lower_str, sr.upper_str) for sr in found_shard_ranges])
def check_user_exit(user_input):
# Do another find & replace but quit when prompted about existing
# shard ranges
out = StringIO()
err = StringIO()
to_patch = 'swift.cli.manage_shard_ranges.input'
with mock.patch('sys.stdout', out), mock_timestamp_now(), \
mock.patch('sys.stderr', err), \
mock.patch(to_patch, side_effect=[user_input]):
ret = main([broker.db_file, 'find_and_replace', '10'])
self.assertEqual(3, ret)
# Shard ranges haven't changed at all
self.assertEqual(found_shard_ranges, broker.get_shard_ranges())
expected = ['This will delete existing 10 shard ranges.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c'],
err.getvalue().splitlines())
check_user_exit('q')
check_user_exit(EOFError)
check_user_exit(KeyboardInterrupt)
def test_compact_bad_args(self):
broker = self._make_broker()
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with self.assertRaises(SystemExit) as cm:
main([broker.db_file, 'compact', '--shrink-threshold', '0'])
self.assertEqual(2, cm.exception.code)
with self.assertRaises(SystemExit) as cm:
main([broker.db_file, 'compact', '--expansion-limit', '0'])
self.assertEqual(2, cm.exception.code)
with self.assertRaises(SystemExit) as cm:
main([broker.db_file, 'compact', '--max-shrinking', '0'])
self.assertEqual(2, cm.exception.code)
with self.assertRaises(SystemExit) as cm:
main([broker.db_file, 'compact', '--max-expanding', '0'])
self.assertEqual(2, cm.exception.code)
def test_compact_not_root(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
# make broker appear to not be a root container
out = StringIO()
err = StringIO()
broker.set_sharding_sysmeta('Quoted-Root', 'not_a/c')
self.assertFalse(broker.is_root_container())
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Shard containers cannot be compacted.',
'This command should be used on a root container.'],
out_lines[:2]
)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.FOUND] * 10,
[sr.state for sr in updated_ranges])
def test_compact_not_sharded(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
# make broker appear to be a root container but it isn't sharded
out = StringIO()
err = StringIO()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertTrue(broker.is_root_container())
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Container is not yet sharded so cannot be compacted.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.FOUND] * 10,
[sr.state for sr in updated_ranges])
def test_compact_overlapping_shard_ranges(self):
# verify that containers with overlaps will not be compacted
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
shard_ranges[3].upper = shard_ranges[4].upper
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file,
'compact', '--yes', '--max-expanding', '10'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Container has overlapping shard ranges so cannot be '
'compacted.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.ACTIVE] * 10,
[sr.state for sr in updated_ranges])
def test_compact_shard_ranges_in_found_state(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards identified for compaction.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assertEqual([ShardRange.FOUND] * 10,
[sr.state for sr in updated_ranges])
def test_compact_user_input(self):
# verify user input 'yes' or 'n' is respected
small_ranges = (3, 4, 7)
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.tombstones = 999
if i not in small_ranges:
sr.object_count = 100001
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
expected_base = [
'Donor shard range(s) with total of 2018 rows:',
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj29'",
" state: active, deleted: 0 upper: 'obj39'",
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj39'",
" state: active, deleted: 0 upper: 'obj49'",
'can be compacted into acceptor shard range:',
" '.shards_a",
" objects: 100001, tombstones: 999, lower: 'obj49'",
" state: active, deleted: 0 upper: 'obj59'",
'Donor shard range(s) with total of 1009 rows:',
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj69'",
" state: active, deleted: 0 upper: 'obj79'",
'can be compacted into acceptor shard range:',
" '.shards_a",
" objects: 100001, tombstones: 999, lower: 'obj79'",
" state: active, deleted: 0 upper: 'obj89'",
'Total of 2 shard sequences identified for compaction.',
'Once applied to the broker these changes will result in '
'shard range compaction the next time the sharder runs.',
]
def do_compact(user_input, options, exp_changes, exit_code):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out),\
mock.patch('sys.stderr', err), \
mock.patch('swift.cli.manage_shard_ranges.input',
side_effect=[user_input]):
ret = main([broker.db_file, 'compact',
'--max-shrinking', '99'] + options)
self.assertEqual(exit_code, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
expected = list(expected_base)
if exp_changes:
expected.extend([
'Updated 2 shard sequences for compaction.',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to compact shards.',
'',
])
else:
expected.extend([
'No changes applied',
'',
])
self.assertEqual(expected, [l.split('/', 1)[0] for l in out_lines])
return broker.get_shard_ranges()
broker_ranges = do_compact('n', [], False, 3)
# expect no changes to shard ranges
self.assertEqual(shard_ranges, broker_ranges)
for i, sr in enumerate(broker_ranges):
self.assertEqual(ShardRange.ACTIVE, sr.state)
broker_ranges = do_compact(EOFError, [], False, 3)
# expect no changes to shard ranges
self.assertEqual(shard_ranges, broker_ranges)
for i, sr in enumerate(broker_ranges):
self.assertEqual(ShardRange.ACTIVE, sr.state)
broker_ranges = do_compact(KeyboardInterrupt, [], False, 3)
# expect no changes to shard ranges
self.assertEqual(shard_ranges, broker_ranges)
for i, sr in enumerate(broker_ranges):
self.assertEqual(ShardRange.ACTIVE, sr.state)
broker_ranges = do_compact('yes', ['--dry-run'], False, 3)
# expect no changes to shard ranges
self.assertEqual(shard_ranges, broker_ranges)
for i, sr in enumerate(broker_ranges):
self.assertEqual(ShardRange.ACTIVE, sr.state)
broker_ranges = do_compact('yes', [], True, 0)
# expect updated shard ranges
shard_ranges[5].lower = shard_ranges[3].lower
shard_ranges[8].lower = shard_ranges[7].lower
self.assertEqual(shard_ranges, broker_ranges)
for i, sr in enumerate(broker_ranges):
if i in small_ranges:
self.assertEqual(ShardRange.SHRINKING, sr.state)
else:
self.assertEqual(ShardRange.ACTIVE, sr.state)
def test_compact_four_donors_two_acceptors(self):
small_ranges = (2, 3, 4, 7)
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
if i not in small_ranges:
sr.object_count = 100001
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 2 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
for i, sr in enumerate(updated_ranges):
if i in small_ranges:
self.assertEqual(ShardRange.SHRINKING, sr.state)
else:
self.assertEqual(ShardRange.ACTIVE, sr.state)
shard_ranges[5].lower = shard_ranges[2].lower
shard_ranges[8].lower = shard_ranges[7].lower
self.assertEqual(shard_ranges, updated_ranges)
for i in (5, 8):
# acceptors should have updated timestamp
self.assertLess(shard_ranges[i].timestamp,
updated_ranges[i].timestamp)
# check idempotency
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99'])
self.assertEqual(0, ret)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
for i, sr in enumerate(updated_ranges):
if i in small_ranges:
self.assertEqual(ShardRange.SHRINKING, sr.state)
else:
self.assertEqual(ShardRange.ACTIVE, sr.state)
def test_compact_all_donors_shrink_to_root(self):
# by default all shard ranges are small enough to shrink so the root
# becomes the acceptor
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
epoch = self._move_broker_to_sharded_state(broker)
own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(epoch, own_sr.state_timestamp) # sanity check
self.assertEqual(ShardRange.SHARDED, own_sr.state) # sanity check
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99'])
self.assertEqual(0, ret, 'stdout:\n%s\nstderr\n%s' %
(out.getvalue(), err.getvalue()))
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 1 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 10,
[sr.state for sr in updated_ranges])
updated_own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(own_sr.timestamp, updated_own_sr.timestamp)
self.assertEqual(own_sr.epoch, updated_own_sr.epoch)
self.assertLess(own_sr.state_timestamp,
updated_own_sr.state_timestamp)
self.assertEqual(ShardRange.ACTIVE, updated_own_sr.state)
def test_compact_single_donor_shrink_to_root(self):
# single shard range small enough to shrink so the root becomes the
# acceptor
broker = self._make_broker()
shard_data = [
{'index': 0, 'lower': '', 'upper': '', 'object_count': 10}
]
shard_ranges = make_shard_ranges(broker, shard_data, '.shards_')
shard_ranges[0].update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
epoch = self._move_broker_to_sharded_state(broker)
own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(epoch, own_sr.state_timestamp) # sanity check
self.assertEqual(ShardRange.SHARDED, own_sr.state) # sanity check
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes'])
self.assertEqual(0, ret, 'stdout:\n%s\nstderr\n%s' %
(out.getvalue(), err.getvalue()))
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 1 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING],
[sr.state for sr in updated_ranges])
updated_own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(own_sr.timestamp, updated_own_sr.timestamp)
self.assertEqual(own_sr.epoch, updated_own_sr.epoch)
self.assertLess(own_sr.state_timestamp,
updated_own_sr.state_timestamp)
self.assertEqual(ShardRange.ACTIVE, updated_own_sr.state)
def test_compact_donors_but_no_suitable_acceptor(self):
# if shard ranges are already shrinking, check that the final one is
# not made into an acceptor if a suitable adjacent acceptor is not
# found (unexpected scenario but possible in an overlap situation)
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, state in enumerate([ShardRange.SHRINKING] * 3 +
[ShardRange.SHARDING] +
[ShardRange.ACTIVE] * 6):
shard_ranges[i].update_state(state)
broker.merge_shard_ranges(shard_ranges)
epoch = self._move_broker_to_sharded_state(broker)
with mock_timestamp_now(epoch):
own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(epoch, own_sr.state_timestamp) # sanity check
self.assertEqual(ShardRange.SHARDED, own_sr.state) # sanity check
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99'])
self.assertEqual(0, ret, 'stdout:\n%s\nstderr\n%s' %
(out.getvalue(), err.getvalue()))
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 1 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
shard_ranges[9].lower = shard_ranges[4].lower # expanded acceptor
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 3 + # unchanged
[ShardRange.SHARDING] + # unchanged
[ShardRange.SHRINKING] * 5 + # moved to shrinking
[ShardRange.ACTIVE], # unchanged
[sr.state for sr in updated_ranges])
with mock_timestamp_now(epoch): # force equal meta-timestamp
updated_own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(dict(own_sr), dict(updated_own_sr))
def test_compact_no_gaps(self):
# verify that compactible sequences do not include gaps
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
gapped_ranges = shard_ranges[:3] + shard_ranges[4:]
broker.merge_shard_ranges(gapped_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 2 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
gapped_ranges[2].lower = gapped_ranges[0].lower
gapped_ranges[8].lower = gapped_ranges[3].lower
self.assertEqual(gapped_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] * 5 + [ShardRange.ACTIVE],
[sr.state for sr in updated_ranges])
def test_compact_max_shrinking_default(self):
# verify default limit on number of shrinking shards per acceptor
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
def do_compact(expect_msg):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn(expect_msg, out_lines)
return broker.get_shard_ranges()
updated_ranges = do_compact(
'Updated 5 shard sequences for compaction.')
for acceptor in (1, 3, 5, 7, 9):
shard_ranges[acceptor].lower = shard_ranges[acceptor - 1].lower
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING, ShardRange.ACTIVE] * 5,
[sr.state for sr in updated_ranges])
# check idempotency
updated_ranges = do_compact('No shards identified for compaction.')
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING, ShardRange.ACTIVE] * 5,
[sr.state for sr in updated_ranges])
def test_compact_max_shrinking(self):
# verify option to limit the number of shrinking shards per acceptor
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
def do_compact(expect_msg):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '7'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn(expect_msg, out_lines)
return broker.get_shard_ranges()
updated_ranges = do_compact(
'Updated 2 shard sequences for compaction.')
shard_ranges[7].lower = shard_ranges[0].lower
shard_ranges[9].lower = shard_ranges[8].lower
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 7 + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE],
[sr.state for sr in updated_ranges])
# check idempotency
updated_ranges = do_compact('No shards identified for compaction.')
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 7 + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE],
[sr.state for sr in updated_ranges])
def test_compact_max_expanding(self):
# verify option to limit the number of expanding shards per acceptor
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
def do_compact(expect_msg):
out = StringIO()
err = StringIO()
# note: max_shrinking is set to 3 so that there is opportunity for
# more than 2 acceptors
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '3', '--max-expanding', '2'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn(expect_msg, out_lines)
return broker.get_shard_ranges()
updated_ranges = do_compact(
'Updated 2 shard sequences for compaction.')
shard_ranges[3].lower = shard_ranges[0].lower
shard_ranges[7].lower = shard_ranges[4].lower
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] * 3,
[sr.state for sr in updated_ranges])
# check idempotency - no more sequences found while existing sequences
# are shrinking
updated_ranges = do_compact('No shards identified for compaction.')
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] * 3,
[sr.state for sr in updated_ranges])
def test_compact_expansion_limit(self):
# verify option to limit the size of each acceptor after compaction
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--expansion-limit', '20'])
self.assertEqual(0, ret, err.getvalue())
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().rstrip('\n').split('\n')
self.assertIn('Updated 5 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
shard_ranges[1].lower = shard_ranges[0].lower
shard_ranges[3].lower = shard_ranges[2].lower
shard_ranges[5].lower = shard_ranges[4].lower
shard_ranges[7].lower = shard_ranges[6].lower
shard_ranges[9].lower = shard_ranges[8].lower
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE] +
[ShardRange.SHRINKING] + [ShardRange.ACTIVE],
[sr.state for sr in updated_ranges])
def test_compact_expansion_limit_less_than_shrink_threshold(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
if i % 2:
sr.object_count = 25
else:
sr.object_count = 3
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--shrink-threshold', '10',
'--expansion-limit', '5'])
self.assertEqual(0, ret)
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards identified for compaction.'],
out_lines[:1])
def test_compact_nothing_to_do(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
out = StringIO()
err = StringIO()
# all shards are too big to shrink
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--shrink-threshold', '5',
'--expansion-limit', '8'])
self.assertEqual(0, ret)
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards identified for compaction.'],
out_lines[:1])
# all shards could shrink but acceptors would be too large
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--shrink-threshold', '11',
'--expansion-limit', '12'])
self.assertEqual(0, ret)
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards identified for compaction.'],
out_lines[:1])
def _do_test_compact_shrink_threshold(self, broker, shard_ranges):
# verify option to set the shrink threshold for compaction;
for i, sr in enumerate(shard_ranges):
sr.update_state(ShardRange.ACTIVE)
# (n-2)th shard range has one extra object
shard_ranges[-2].object_count = shard_ranges[-2].object_count + 1
broker.merge_shard_ranges(shard_ranges)
self._move_broker_to_sharded_state(broker)
# with threshold set to 10 no shard ranges can be shrunk
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99',
'--shrink-threshold', '10'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards identified for compaction.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.ACTIVE] * 10,
[sr.state for sr in updated_ranges])
# with threshold == 11 all but the final 2 shard ranges can be shrunk;
# note: the (n-1)th shard range is NOT shrunk to root
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'compact', '--yes',
'--max-shrinking', '99',
'--shrink-threshold', '11'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertIn('Updated 1 shard sequences for compaction.', out_lines)
updated_ranges = broker.get_shard_ranges()
shard_ranges[8].lower = shard_ranges[0].lower
self.assertEqual(shard_ranges, updated_ranges)
self.assertEqual([ShardRange.SHRINKING] * 8 + [ShardRange.ACTIVE] * 2,
[sr.state for sr in updated_ranges])
def test_compact_shrink_threshold(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
self._do_test_compact_shrink_threshold(broker, shard_ranges)
def test_compact_shrink_threshold_with_tombstones(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
for i, sr in enumerate(shard_ranges):
sr.object_count = sr.object_count - i
sr.tombstones = i
self._do_test_compact_shrink_threshold(broker, shard_ranges)
def test_repair_not_root(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
# make broker appear to not be a root container
out = StringIO()
err = StringIO()
broker.set_sharding_sysmeta('Quoted-Root', 'not_a/c')
self.assertFalse(broker.is_root_container())
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Shard containers cannot be repaired.',
'This command should be used on a root container.'],
out_lines[:2]
)
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
def test_repair_no_shard_ranges(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['No shards found, nothing to do.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal([], updated_ranges)
def test_repair_one_incomplete_sequence(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data[:-1], '.shards_')
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Found no complete sequence of shard ranges.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
def test_repair_overlapping_incomplete_sequences(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data[:-1], '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
# use new time to get distinct shard names
overlap_shard_ranges = make_shard_ranges(
broker,
self.overlap_shard_data_1[:2] + self.overlap_shard_data_1[6:],
'.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Found no complete sequence of shard ranges.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
expected = sorted(shard_ranges + overlap_shard_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_gaps(self):
def do_test(missing_index, expander_index, missing_state=None):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
for shard in self.shard_data:
shard['state'] = ShardRange.ACTIVE
with mock_timestamp_now(next(self.ts_iter)):
all_shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
shard_ranges = list(all_shard_ranges)
if missing_state is None:
missing_range = shard_ranges.pop(missing_index)
exp_gap_contents = []
else:
missing_range = shard_ranges[missing_index]
missing_range.state = missing_state
exp_gap_contents = [
" '%s'" % missing_range.name, mock.ANY, mock.ANY]
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
expander = all_shard_ranges[expander_index]
if missing_index < expander_index:
expander.lower = missing_range.lower
else:
expander.upper = missing_range.upper
expander.state_timestamp = expander.timestamp
expander.meta_timestamp = expander.timestamp
expander.timestamp = ts_now
self.assertEqual(
['Found 1 gaps:',
' gap: %r - %r' % (missing_range.lower, missing_range.upper),
' apparent gap contents:']
+ exp_gap_contents +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % expander.name] +
[mock.ANY] * 2 +
['',
'Repairs necessary to fill gaps.',
'The following expanded shard range(s) will be applied to '
'the DB:',
" '%s'" % expander.name] +
[mock.ANY] * 2 +
['',
'It is recommended that no other concurrent changes are made '
'to the ',
'shard ranges while fixing gaps. If necessary, abort '
'this change ',
'and stop any auto-sharding processes before repeating '
'this command.',
'',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to fill gaps.',
''],
out_lines)
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
os.remove(broker.db_file)
for i in range(len(self.shard_data) - 1):
do_test(i, i + 1)
do_test(len(self.shard_data) - 1, len(self.shard_data) - 2)
for i in range(len(self.shard_data) - 1):
do_test(i, i + 1, ShardRange.SHRINKING)
do_test(len(self.shard_data) - 1, len(self.shard_data) - 2,
ShardRange.SHRINKING)
def test_repair_gaps_multiple_missing(self):
def do_test(broker, max_expanding):
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
states = [
ShardRange.ACTIVE,
ShardRange.SHRINKING,
ShardRange.SHRUNK,
ShardRange.ACTIVE,
ShardRange.SHRUNK,
ShardRange.SHRINKING,
ShardRange.ACTIVE,
ShardRange.SHRINKING,
ShardRange.SHRUNK,
ShardRange.SHARDED,
]
for i, shard in enumerate(self.shard_data):
shard['state'] = states[i]
if states[i] in (ShardRange.SHRUNK, ShardRange.SHARDED):
shard['deleted'] = 1
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
orig_shard_ranges = broker.get_shard_ranges(include_deleted=True)
out = StringIO()
err = StringIO()
args = [broker.db_file, 'repair', '--gaps', '--yes']
if max_expanding is not None:
args.extend(['--max-expanding', str(max_expanding)])
with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
ret = main(args)
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
os.remove(broker.db_file)
return orig_shard_ranges, out_lines, ts_now
# max-expanding 1
broker = self._make_broker()
orig_shard_ranges, out_lines, ts_now = do_test(broker, 1)
orig_shard_ranges[3].timestamp = ts_now
orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
self.assertEqual(
['Found 3 gaps:',
' gap: %r - %r' % (orig_shard_ranges[1].lower,
orig_shard_ranges[2].upper),
' apparent gap contents:']
+ [mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[4].lower,
orig_shard_ranges[5].upper),
' apparent gap contents:'] +
[mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[7].lower,
orig_shard_ranges[9].upper),
' apparent gap contents:'] +
[mock.ANY] * 9 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
['',
'Repairs necessary to fill gaps.',
'The following expanded shard range(s) will be applied to the '
'DB:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 6 +
['',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to fill gaps.',
''],
out_lines)
updated_ranges = broker.get_shard_ranges(include_deleted=True)
self.assert_shard_ranges_equal(
sorted(orig_shard_ranges, key=lambda s: s.name),
sorted(updated_ranges, key=lambda s: s.name))
# max-expanding 2
broker = self._make_broker()
orig_shard_ranges, out_lines, ts_now = do_test(broker, 2)
orig_shard_ranges[3].timestamp = ts_now
orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
orig_shard_ranges[6].timestamp = ts_now
orig_shard_ranges[6].lower = orig_shard_ranges[4].lower
self.assertEqual(
['Found 3 gaps:',
' gap: %r - %r' % (orig_shard_ranges[1].lower,
orig_shard_ranges[2].upper),
' apparent gap contents:'] +
[mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[4].lower,
orig_shard_ranges[5].upper),
' apparent gap contents:'] +
[mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[7].lower,
orig_shard_ranges[9].upper),
' apparent gap contents:'] +
[mock.ANY] * 9 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
['',
'Repairs necessary to fill gaps.',
'The following expanded shard range(s) will be applied to the '
'DB:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 2 +
[" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 6 +
['',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to fill gaps.',
''],
out_lines)
updated_ranges = broker.get_shard_ranges(include_deleted=True)
self.assert_shard_ranges_equal(
sorted(orig_shard_ranges, key=lambda s: s.name),
sorted(updated_ranges, key=lambda s: s.name))
# max-expanding unlimited
broker = self._make_broker()
orig_shard_ranges, out_lines, ts_now = do_test(broker, None)
orig_shard_ranges[3].timestamp = ts_now
orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
orig_shard_ranges[6].timestamp = ts_now
orig_shard_ranges[6].lower = orig_shard_ranges[4].lower
orig_shard_ranges[6].upper = orig_shard_ranges[9].upper
self.assertEqual(
['Found 3 gaps:',
' gap: %r - %r' % (orig_shard_ranges[1].lower,
orig_shard_ranges[2].upper),
' apparent gap contents:'] +
[mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[4].lower,
orig_shard_ranges[5].upper),
' apparent gap contents:'] +
[mock.ANY] * 6 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
[' gap: %r - %r' % (orig_shard_ranges[7].lower,
orig_shard_ranges[9].upper),
' apparent gap contents:'] +
[mock.ANY] * 9 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 2 +
['',
'Repairs necessary to fill gaps.',
'The following expanded shard range(s) will be applied to the '
'DB:',
" '%s'" % orig_shard_ranges[3].name] +
[mock.ANY] * 2 +
[" '%s'" % orig_shard_ranges[6].name] +
[mock.ANY] * 6 +
['',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to fill gaps.',
''],
out_lines)
updated_ranges = broker.get_shard_ranges(include_deleted=True)
self.assert_shard_ranges_equal(
sorted(orig_shard_ranges, key=lambda s: s.name),
sorted(updated_ranges, key=lambda s: s.name))
def test_repair_gaps_complete_sequence(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
for shard in self.shard_data:
shard['state'] = ShardRange.ACTIVE
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock_timestamp_now(next(self.ts_iter)), \
mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Found one complete sequence of %d shard ranges with no gaps.'
% len(self.shard_data),
'No repairs necessary.'], out_lines[:2])
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
def test_repair_gaps_with_overlap(self):
# verify that overlaps don't look like gaps
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
for shard in self.shard_data:
shard['state'] = ShardRange.ACTIVE
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
# create a gap
shard_ranges[3].state = ShardRange.SHRINKING
# create an overlap
shard_ranges[5].lower = 'obj45'
self.assertLess(shard_ranges[5].lower, shard_ranges[4].upper)
broker.merge_shard_ranges(shard_ranges)
orig_shard_ranges = broker.get_shard_ranges()
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Found 1 gaps:',
' gap: %r - %r' % (shard_ranges[3].lower,
shard_ranges[3].upper),
' apparent gap contents:'] +
[mock.ANY] * 3 +
[' gap can be fixed by expanding neighbor range:',
" '%s'" % shard_ranges[4].name] +
[mock.ANY] * 2 +
['',
'Repairs necessary to fill gaps.',
'The following expanded shard range(s) will be applied to the '
'DB:',
" '%s'" % shard_ranges[4].name] +
[mock.ANY] * 6 +
['',
'Run container-replicator to replicate the changes to '
'other nodes.',
'Run container-sharder on all nodes to fill gaps.',
''],
out_lines)
orig_shard_ranges[4].lower = shard_ranges[3].lower
orig_shard_ranges[4].timestamp = ts_now
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(orig_shard_ranges, updated_ranges)
def test_repair_gaps_not_root(self):
broker = self._make_broker()
shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
# make broker appear to not be a root container
out = StringIO()
err = StringIO()
broker.set_sharding_sysmeta('Quoted-Root', 'not_a/c')
self.assertFalse(broker.is_root_container())
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair', '--gaps'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Shard containers cannot be repaired.',
'This command should be used on a root container.'],
out_lines[:2]
)
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
def test_repair_not_needed(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Found one complete sequence of 10 shard ranges and no '
'overlapping shard ranges.',
'No repairs necessary.'],
out_lines[:2])
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
def _do_test_repair_exits_if_undesirable_state(self, undesirable_state):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
# make one shard be in an undesirable state
shard_ranges[2].update_state(undesirable_state)
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_2 = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_2)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'repair'])
self.assertEqual(1, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['WARNING: Found shard ranges in %s state'
% ShardRange.STATES[undesirable_state]], out_lines[:1])
# nothing changed in DB
self.assert_shard_ranges_equal(
sorted(shard_ranges + overlap_shard_ranges_2,
key=ShardRange.sort_key),
broker.get_shard_ranges())
def test_repair_exits_if_sharding_state(self):
self._do_test_repair_exits_if_undesirable_state(ShardRange.SHARDING)
def test_repair_exits_if_shrinking_state(self):
self._do_test_repair_exits_if_undesirable_state(ShardRange.SHRINKING)
def test_repair_one_complete_sequences_one_incomplete(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_2 = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_2)
self.assertTrue(broker.is_root_container())
def do_repair(user_input, ts_now, options, exit_code):
options = options if options else []
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_now), \
mock.patch('swift.cli.manage_shard_ranges.input',
side_effect=[user_input]):
ret = main(
[broker.db_file, 'repair', '--min-shard-age', '0'] +
options)
self.assertEqual(exit_code, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
# user input 'n'
ts_now = next(self.ts_iter)
do_repair('n', ts_now, [], 3)
updated_ranges = broker.get_shard_ranges()
expected = sorted(
shard_ranges + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
ts_now = next(self.ts_iter)
do_repair(EOFError, ts_now, [], 3)
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(expected, updated_ranges)
ts_now = next(self.ts_iter)
do_repair(KeyboardInterrupt, ts_now, [], 3)
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(expected, updated_ranges)
# --dry-run
ts_now = next(self.ts_iter)
do_repair('y', ts_now, ['--dry-run'], 3)
updated_ranges = broker.get_shard_ranges()
expected = sorted(
shard_ranges + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
# --n
ts_now = next(self.ts_iter)
do_repair('y', ts_now, ['-n'], 3)
updated_ranges = broker.get_shard_ranges()
expected = sorted(
shard_ranges + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
# user input 'yes'
ts_now = next(self.ts_iter)
do_repair('yes', ts_now, [], 0)
updated_ranges = broker.get_shard_ranges()
for sr in overlap_shard_ranges_2:
sr.update_state(ShardRange.SHRINKING, ts_now)
sr.epoch = ts_now
expected = sorted(
shard_ranges + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_younger_overlapping_donor_shards(self):
# test shard range repair on the normal acceptor ranges and young
# overlapping shard ranges which are younger than '--min-shard-age',
# expect them not to be repaired.
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
ts_now = next(self.ts_iter)
with mock_timestamp_now(Timestamp(float(ts_now) - 61)):
acceptor_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(ts_now):
overlap_donor_ranges = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main(
[broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['2 overlapping donor shards ignored due to minimum age limit'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
expected = sorted(
acceptor_ranges + overlap_donor_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_younger_acceptor_with_overlapping_donor_shards(self):
# test shard range repair on the overlapping normal donor ranges and
# young acceptor shard ranges who are younger than '--min-shard-age',
# expect no overlapping ranges to be repaired.
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
ts_now = next(self.ts_iter)
with mock_timestamp_now(Timestamp(float(ts_now) + 3601)):
acceptor_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(ts_now):
overlap_donor_ranges = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock_timestamp_now(Timestamp(float(ts_now) + 3601 + 59)):
ret = main(
[broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['2 donor shards ignored due to existence of overlapping young'
' acceptors'], out_lines[:1])
updated_ranges = broker.get_shard_ranges()
expected = sorted(
acceptor_ranges + overlap_donor_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_older_overlapping_donor_and_acceptor_shards(self):
# test shard range repair on the overlapping donor and acceptor shard
# ranges which all are older than '--min-shard-age', expect them to be
# repaired.
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
ts_now = next(self.ts_iter)
with mock_timestamp_now(ts_now):
acceptor_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(Timestamp(float(ts_now) + 1800)):
overlap_donor_ranges = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
ts_1hr_after = Timestamp(float(ts_now) + 3601)
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_1hr_after):
ret = main(
[broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
for sr in overlap_donor_ranges:
sr.update_state(ShardRange.SHRINKING, ts_1hr_after)
sr.epoch = ts_1hr_after
expected = sorted(
acceptor_ranges + overlap_donor_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_overlapping_donor_and_acceptor_shards_default(self):
# test shard range repair on the overlapping donor and acceptor shard
# ranges wth default '--min-shard-age' value.
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
ts_now = next(self.ts_iter)
with mock_timestamp_now(ts_now):
acceptor_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(Timestamp(int(ts_now) + 1)):
overlap_donor_ranges = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
ts_repair = Timestamp(int(ts_now) + 4 * 3600 - 1)
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_repair):
# default min-shard-age prevents repair...
ret = main([broker.db_file, 'repair', '-y'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['2 overlapping donor shards ignored due to minimum age limit'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
expected = sorted(
acceptor_ranges + overlap_donor_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
out = StringIO()
err = StringIO()
ts_repair = Timestamp(int(ts_now) + 4 * 3600 + 2)
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_repair):
# default min-shard-age allows repair now...
ret = main([broker.db_file, 'repair', '-y'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
for sr in overlap_donor_ranges:
sr.update_state(ShardRange.SHRINKING, ts_repair)
sr.epoch = ts_repair
expected = sorted(
acceptor_ranges + overlap_donor_ranges,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_two_complete_sequences_one_incomplete(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_1 = make_shard_ranges(
broker, self.overlap_shard_data_1, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_2 = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_1 +
overlap_shard_ranges_2)
self.assertTrue(broker.is_root_container())
out = StringIO()
err = StringIO()
ts_now = next(self.ts_iter)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_now):
ret = main([broker.db_file, 'repair', '--yes',
'--min-shard-age', '0'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
updated_ranges = broker.get_shard_ranges()
for sr in overlap_shard_ranges_1 + overlap_shard_ranges_2:
sr.update_state(ShardRange.SHRINKING, ts_now)
sr.epoch = ts_now
expected = sorted(
shard_ranges + overlap_shard_ranges_1 + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_parent_overlaps_with_children_donors(self):
# Verify that the overlap repair command ignores expected transient
# overlaps between parent shard acceptor and child donor shards.
root_broker = self._make_broker()
root_broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertTrue(root_broker.is_root_container())
# The parent shard range would have been set to state SHARDING in the
# shard container but is still showing as ACTIVE in the root container.
# (note: it is valid for a single shard to span entire namespace)
ts_parent = next(self.ts_iter)
parent_shard = ShardRange(
ShardRange.make_path('.shards_a', 'c', 'c', ts_parent, 0),
ts_parent, lower='', upper='', object_count=10,
state=ShardRange.ACTIVE)
# Children shards have reported themselves to root as CLEAVING/
# CREATED.
ts_child = next(self.ts_iter)
child_shards = [
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_shard.container, ts_child, 0),
ts_child, lower='', upper='p', object_count=1,
state=ShardRange.CLEAVED),
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_shard.container, ts_child, 1),
ts_child, lower='p', upper='', object_count=1,
state=ShardRange.CLEAVED)]
root_broker.merge_shard_ranges([parent_shard] + child_shards)
out = StringIO()
err = StringIO()
ts_now = next(self.ts_iter)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_now):
ret = main([root_broker.db_file, 'repair',
'--yes', '--min-shard-age', '0'])
err_lines = err.getvalue().split('\n')
out_lines = out.getvalue().split('\n')
self.assertEqual(0, ret, err_lines + out_lines)
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assertNotIn(
'Repairs necessary to remove overlapping shard ranges.',
out_lines)
self.assertEqual(
['2 donor shards ignored due to parent-child relationship'
' checks'], out_lines[:1])
updated_ranges = root_broker.get_shard_ranges()
# Expect no change to shard ranges.
expected = sorted([parent_shard] + child_shards,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
def test_repair_children_overlaps_with_parent_donor(self):
# Verify that the overlap repair command ignores expected transient
# overlaps between child shard acceptors and parent donor shards.
root_broker = self._make_broker()
root_broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertTrue(root_broker.is_root_container())
# The parent shard range would have been set to state SHARDING in the
# shard container but is still showing as ACTIVE in the root container.
# (note: it is valid for a single shard to span entire namespace)
ts_parent = next(self.ts_iter)
parent_shard = ShardRange(
ShardRange.make_path('.shards_a', 'c', 'c', ts_parent, 0),
ts_parent, lower='', upper='', object_count=5,
state=ShardRange.ACTIVE)
# Children shards have reported themselves to root as CLEAVING/CREATED,
# but they will end up with becoming acceptor shards due to having more
# objects than the above parent shard.
ts_child = next(self.ts_iter)
child_shards = [
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_shard.container, ts_child, 0),
ts_child, lower='', upper='p', object_count=5,
state=ShardRange.CLEAVED),
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_shard.container, ts_child, 1),
ts_child, lower='p', upper='', object_count=5,
state=ShardRange.CLEAVED)]
root_broker.merge_shard_ranges([parent_shard] + child_shards)
out = StringIO()
err = StringIO()
ts_now = next(self.ts_iter)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_now):
ret = main([root_broker.db_file, 'repair',
'--yes', '--min-shard-age', '0'])
err_lines = err.getvalue().split('\n')
out_lines = out.getvalue().split('\n')
self.assertEqual(0, ret, err_lines + out_lines)
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
self.assertNotIn(
'Repairs necessary to remove overlapping shard ranges.',
out_lines)
self.assertEqual(
['1 donor shards ignored due to parent-child relationship'
' checks'], out_lines[:1])
updated_ranges = root_broker.get_shard_ranges()
# Expect no change to shard ranges.
expected = sorted([parent_shard] + child_shards,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
@with_tempdir
def test_show_and_analyze(self, tempdir):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)): # t1
shard_ranges = make_shard_ranges(
broker, self.shard_data, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_1 = make_shard_ranges(
broker, self.overlap_shard_data_1, '.shards_')
with mock_timestamp_now(next(self.ts_iter)):
overlap_shard_ranges_2 = make_shard_ranges(
broker, self.overlap_shard_data_2, '.shards_')
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_1 +
overlap_shard_ranges_2)
self.assertTrue(broker.is_root_container())
# run show command
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'show'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
shard_json = json.loads(out.getvalue())
expected = sorted(
shard_ranges + overlap_shard_ranges_1 + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(
expected, [ShardRange.from_dict(data) for data in shard_json])
# dump data to a file and then run analyze subcommand
shard_file = os.path.join(tempdir, 'shards.json')
with open(shard_file, 'w') as fd:
json.dump(shard_json, fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([shard_file, 'analyze'])
self.assertEqual(0, ret)
self.assertEqual('', err.getvalue())
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
# no changes made to broker
updated_ranges = broker.get_shard_ranges()
expected = sorted(
shard_ranges + overlap_shard_ranges_1 + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
# tweak timestamps to make the preferred path include shards from two
# sets, so that shards to remove have name-timestamps that are also in
# shards to keep
t4 = next(self.ts_iter)
for sr in shard_ranges[:5] + overlap_shard_ranges_1[5:]:
sr.timestamp = t4
broker.merge_shard_ranges(shard_ranges + overlap_shard_ranges_1 +
overlap_shard_ranges_2)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([broker.db_file, 'show'])
self.assertEqual(0, ret)
shard_json = json.loads(out.getvalue())
expected = sorted(
shard_ranges + overlap_shard_ranges_1 + overlap_shard_ranges_2,
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(
expected, [ShardRange.from_dict(data) for data in shard_json])
with open(shard_file, 'w') as fd:
json.dump(shard_json, fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([shard_file, 'analyze'])
self.assertEqual(0, ret)
self.assertEqual('', err.getvalue())
out_lines = out.getvalue().split('\n')
self.assertEqual(
['Repairs necessary to remove overlapping shard ranges.'],
out_lines[:1])
filtered_shard_json = [{k: v for k, v in sr.items() if k != 'epoch'}
for sr in shard_json]
with open(shard_file, 'w') as fd:
json.dump(filtered_shard_json, fd)
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
ret = main([shard_file, 'analyze'])
self.assertEqual(0, ret)
self.assertEqual('', err.getvalue())
new_out_lines = out.getvalue().split('\n')
self.assertEqual(out_lines, new_out_lines)
def test_subcommand_required(self):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err):
if six.PY2:
with self.assertRaises(SystemExit) as cm:
main(['db file'])
err_lines = err.getvalue().split('\n')
self.assertIn('too few arguments', ' '.join(err_lines))
self.assertEqual(2, cm.exception.code)
else:
ret = main(['db file'])
self.assertEqual(2, ret)
err_lines = err.getvalue().split('\n')
self.assertIn('A sub-command is required.', err_lines)
def test_dry_run_and_yes_is_invalid(self):
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), \
mock.patch('sys.stderr', err), \
self.assertRaises(SystemExit) as cm:
main(['db file', 'repair', '--dry-run', '--yes'])
self.assertEqual(2, cm.exception.code)
err_lines = err.getvalue().split('\n')
runner = os.path.basename(sys.argv[0])
self.assertIn(
'usage: %s path_to_file repair [-h] [--yes | --dry-run]' % runner,
err_lines[0])
self.assertIn(
"argument --yes/-y: not allowed with argument --dry-run/-n",
err_lines[-2], err_lines)
| swift-master | test/unit/cli/test_manage_shard_ranges.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import json
import mock
import six
import unittest
from swift.cli import container_deleter
from swift.common import internal_client
from swift.common import swob
from swift.common import utils
AppCall = collections.namedtuple('AppCall', [
'method', 'path', 'query', 'headers', 'body'])
class FakeInternalClient(internal_client.InternalClient):
def __init__(self, responses):
self.resp_iter = iter(responses)
self.calls = []
def make_request(self, method, path, headers, acceptable_statuses,
body_file=None, params=None):
if body_file is None:
body = None
else:
body = body_file.read()
path, _, query = path.partition('?')
self.calls.append(AppCall(method, path, query, headers, body))
resp = next(self.resp_iter)
if isinstance(resp, Exception):
raise resp
return resp
def __enter__(self):
return self
def __exit__(self, *args):
unused_responses = [r for r in self.resp_iter]
if unused_responses:
raise Exception('Unused responses: %r' % unused_responses)
class TestContainerDeleter(unittest.TestCase):
def setUp(self):
patcher = mock.patch.object(container_deleter.time, 'time',
side_effect=itertools.count())
patcher.__enter__()
self.addCleanup(patcher.__exit__, None, None, None)
patcher = mock.patch.object(container_deleter, 'OBJECTS_PER_UPDATE', 5)
patcher.__enter__()
self.addCleanup(patcher.__exit__, None, None, None)
def test_make_delete_jobs(self):
ts = '1558463777.42739'
self.assertEqual(
container_deleter.make_delete_jobs(
'acct', 'cont', ['obj1', 'obj2'],
utils.Timestamp(ts)),
[{'name': ts + '-acct/cont/obj1',
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': ts + '-acct/cont/obj2',
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_make_delete_jobs_native_utf8(self):
ts = '1558463777.42739'
uacct = acct = u'acct-\U0001f334'
ucont = cont = u'cont-\N{SNOWMAN}'
uobj1 = obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
uobj2 = obj2 = u'/obj-\N{GREEK CAPITAL LETTER OMEGA}'
if six.PY2:
acct = acct.encode('utf8')
cont = cont.encode('utf8')
obj1 = obj1.encode('utf8')
obj2 = obj2.encode('utf8')
self.assertEqual(
container_deleter.make_delete_jobs(
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
[{'name': u'%s-%s/%s/%s' % (ts, uacct, ucont, uobj1),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': u'%s-%s/%s/%s' % (ts, uacct, ucont, uobj2),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_make_delete_jobs_unicode_utf8(self):
ts = '1558463777.42739'
acct = u'acct-\U0001f334'
cont = u'cont-\N{SNOWMAN}'
obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
obj2 = u'obj-\N{GREEK CAPITAL LETTER OMEGA}'
self.assertEqual(
container_deleter.make_delete_jobs(
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
[{'name': u'%s-%s/%s/%s' % (ts, acct, cont, obj1),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': u'%s-%s/%s/%s' % (ts, acct, cont, obj2),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_mark_for_deletion_empty_no_yield(self):
with FakeInternalClient([
swob.Response(json.dumps([
])),
]) as swift:
self.assertEqual(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'marker',
'end',
'prefix',
timestamp=None,
yield_time=None,
), 0)
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=marker&end_marker=end&prefix=prefix',
{}, None),
])
def test_mark_for_deletion_empty_with_yield(self):
with FakeInternalClient([
swob.Response(json.dumps([
])),
]) as swift:
self.assertEqual(list(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'marker',
'end',
'prefix',
timestamp=None,
yield_time=0.5,
)), [(0, None)])
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=marker&end_marker=end&prefix=prefix',
{}, None),
])
def test_mark_for_deletion_one_update_no_yield(self):
ts = '1558463777.42739'
with FakeInternalClient([
swob.Response(json.dumps([
{'name': '/obj1'},
{'name': 'obj2'},
{'name': 'obj3'},
])),
swob.Response(json.dumps([
])),
swob.Response(status=202),
]) as swift:
self.assertEqual(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'',
'',
'',
timestamp=utils.Timestamp(ts),
yield_time=None,
), 3)
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=&end_marker=&prefix=', {}, None),
('GET', '/v1/account/container',
'format=json&marker=obj3&end_marker=&prefix=', {}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
])
self.assertEqual(
json.loads(swift.calls[-1].body),
container_deleter.make_delete_jobs(
'account', 'container', ['/obj1', 'obj2', 'obj3'],
utils.Timestamp(ts)
)
)
def test_mark_for_deletion_two_updates_with_yield(self):
ts = '1558463777.42739'
with FakeInternalClient([
swob.Response(json.dumps([
{'name': 'obj1'},
{'name': 'obj2'},
{'name': 'obj3'},
{'name': u'obj4-\N{SNOWMAN}'},
{'name': 'obj5'},
{'name': 'obj6'},
])),
swob.Response(status=202),
swob.Response(json.dumps([
])),
swob.Response(status=202),
]) as swift:
self.assertEqual(list(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'',
'end',
'pre',
timestamp=utils.Timestamp(ts),
yield_time=0,
)), [(5, 'obj5'), (6, 'obj6'), (6, None)])
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=&end_marker=end&prefix=pre', {}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
('GET', '/v1/account/container',
'format=json&marker=obj6&end_marker=end&prefix=pre',
{}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
])
self.assertEqual(
json.loads(swift.calls[-3].body),
container_deleter.make_delete_jobs(
'account', 'container',
['obj1', 'obj2', 'obj3', u'obj4-\N{SNOWMAN}', 'obj5'],
utils.Timestamp(ts)
)
)
self.assertEqual(
json.loads(swift.calls[-1].body),
container_deleter.make_delete_jobs(
'account', 'container', ['obj6'],
utils.Timestamp(ts)
)
)
def test_init_internal_client_log_name(self):
with mock.patch(
'swift.cli.container_deleter.InternalClient') \
as mock_ic:
container_deleter.main(['a', 'c', '--request-tries', '2'])
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf',
'Swift Container Deleter', 2,
global_conf={'log_name': 'container-deleter-ic'})
| swift-master | test/unit/cli/test_container_deleter.py |
#! /usr/bin/env python
# Copyright (c) 2015 Samuel Merritt <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import mock
from six import StringIO
import unittest
from test.unit import with_tempdir
from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario
class TestRunScenario(unittest.TestCase):
@with_tempdir
def test_it_runs(self, tempdir):
builder_path = os.path.join(tempdir, 'test.builder')
scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100],
['add', 'z2-3.4.5.6:7/sda9', 200],
['add', 'z2-3.4.5.6:7/sda10', 200],
['add', 'z2-3.4.5.6:7/sda11', 200]],
[['set_weight', 0, 150]],
[['remove', 1]],
[['save', builder_path]]]}
parsed = parse_scenario(json.dumps(scenario))
fake_stdout = StringIO()
with mock.patch('sys.stdout', fake_stdout):
run_scenario(parsed)
# Just test that it produced some output as it ran; the fact that
# this doesn't crash and produces output that resembles something
# useful is good enough.
self.assertIn('Rebalance', fake_stdout.getvalue())
self.assertTrue(os.path.exists(builder_path))
class TestParseScenario(unittest.TestCase):
def test_good(self):
scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100],
['add', 'z2-3.4.5.6:7/sda9', 200]],
[['set_weight', 0, 150]],
[['remove', 1]]]}
parsed = parse_scenario(json.dumps(scenario))
self.assertEqual(parsed['replicas'], 3)
self.assertEqual(parsed['part_power'], 8)
self.assertEqual(parsed['random_seed'], 123)
self.assertEqual(parsed['overload'], 0)
self.assertEqual(parsed['rounds'], [
[['add', {'device': 'sda8',
'ip': '3.4.5.6',
'meta': '',
'port': 7,
'region': 1,
'replication_ip': '3.4.5.6',
'replication_port': 7,
'weight': 100.0,
'zone': 2}],
['add', {'device': u'sda9',
'ip': u'3.4.5.6',
'meta': '',
'port': 7,
'region': 1,
'replication_ip': '3.4.5.6',
'replication_port': 7,
'weight': 200.0,
'zone': 2}]],
[['set_weight', 0, 150.0]],
[['remove', 1]]])
# The rest of this test class is just a catalog of the myriad ways that
# the input can be malformed.
def test_invalid_json(self):
self.assertRaises(ValueError, parse_scenario, "{")
def test_json_not_object(self):
self.assertRaises(ValueError, parse_scenario, "[]")
self.assertRaises(ValueError, parse_scenario, "\"stuff\"")
def test_bad_replicas(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['replicas']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, replicas='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, replicas=-1)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_part_power(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['part_power']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power=0)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power=33)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_random_seed(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['random_seed']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, random_seed='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_overload(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['overload']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, overload='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, overload=-0.01)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_rounds(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
self.assertRaises(ValueError, parse_scenario, json.dumps(base))
busted = dict(base, rounds={})
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(base, rounds=[{}])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(base, rounds=[[['bork']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_add(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['add']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# no weight
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# too many fields
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', 1, 2]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# can't parse
busted = dict(base, rounds=[[['add', 'not a good value', 100]]])
# N.B. the ValueError's coming out of ring.utils.parse_add_value
# are already pretty good
expected = "Invalid device specifier (round 0, command 0): " \
"Invalid add value: not a good value"
try:
parse_scenario(json.dumps(busted))
except ValueError as err:
self.assertEqual(str(err), expected)
# negative weight
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', -1]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_remove(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['remove']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bad dev id
busted = dict(base, rounds=[[['remove', 'not an int']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# too many fields
busted = dict(base, rounds=[[['remove', 1, 2]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_set_weight(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['set_weight']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# no weight
busted = dict(base, rounds=[[['set_weight', 0]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bad dev id
busted = dict(base, rounds=[[['set_weight', 'not an int', 90]]])
expected = "Invalid device ID in set_weight (round 0, command 0): " \
"invalid literal for int() with base 10: 'not an int'"
try:
parse_scenario(json.dumps(busted))
except ValueError as e:
self.assertEqual(str(e), expected)
# negative weight
busted = dict(base, rounds=[[['set_weight', 1, -1]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bogus weight
busted = dict(base, rounds=[[['set_weight', 1, 'bogus']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_save(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no builder name
busted = dict(base, rounds=[[['save']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
| swift-master | test/unit/cli/test_ring_builder_analyzer.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import json
from contextlib import contextmanager
import logging
from textwrap import dedent
import mock
import os
import pickle
import shutil
import tempfile
import time
import unittest
import uuid
from six.moves import cStringIO as StringIO
from swift.cli import relinker
from swift.common import ring, utils
from swift.common import storage_policy
from swift.common.exceptions import PathNotDir
from swift.common.storage_policy import (
StoragePolicy, StoragePolicyCollection, POLICIES, ECStoragePolicy,
get_policy_string)
from swift.obj.diskfile import write_metadata, DiskFileRouter, \
DiskFileManager, relink_paths, BaseDiskFileManager
from test.debug_logger import debug_logger
from test.unit import skip_if_no_xattrs, DEFAULT_TEST_EC_TYPE, \
patch_policies
PART_POWER = 8
class TestRelinker(unittest.TestCase):
maxDiff = None
def setUp(self):
skip_if_no_xattrs()
self.logger = debug_logger()
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
self.recon_cache_path = os.path.join(self.testdir, 'cache')
self.recon_cache = os.path.join(self.recon_cache_path,
'relinker.recon')
shutil.rmtree(self.testdir, ignore_errors=True)
os.mkdir(self.testdir)
os.mkdir(self.devices)
os.mkdir(self.recon_cache_path)
self.rb = ring.RingBuilder(PART_POWER, 6.0, 1)
for i in range(6):
ip = "127.0.0.%s" % i
self.rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': ip, 'port': 10000, 'device': 'sda1'})
self.rb.rebalance(seed=1)
self.conf_file = os.path.join(self.testdir, 'relinker.conf')
self._setup_config()
self.existing_device = 'sda1'
os.mkdir(os.path.join(self.devices, self.existing_device))
self.objects = os.path.join(self.devices, self.existing_device,
'objects')
self.policy = StoragePolicy(0, 'platinum', True)
storage_policy._POLICIES = StoragePolicyCollection([self.policy])
self._setup_object(policy=self.policy)
patcher = mock.patch('swift.cli.relinker.hubs')
self.mock_hubs = patcher.start()
self.addCleanup(patcher.stop)
def _setup_config(self):
config = """
[DEFAULT]
swift_dir = {swift_dir}
devices = {devices}
mount_check = {mount_check}
[object-relinker]
recon_cache_path = {recon_cache_path}
# update every chance we get!
stats_interval = 0
""".format(
swift_dir=self.testdir,
devices=self.devices,
mount_check=False,
recon_cache_path=self.recon_cache_path,
)
with open(self.conf_file, 'w') as f:
f.write(dedent(config))
def _get_object_name(self, condition=None):
attempts = []
for _ in range(50):
account = 'a'
container = 'c'
obj = 'o-' + str(uuid.uuid4())
_hash = utils.hash_path(account, container, obj)
part = utils.get_partition_for_hash(_hash, PART_POWER)
next_part = utils.get_partition_for_hash(_hash, PART_POWER + 1)
obj_path = os.path.join(os.path.sep, account, container, obj)
# There's 1/512 chance that both old and new parts will be 0;
# that's not a terribly interesting case, as there's nothing to do
attempts.append((part, next_part, 2**PART_POWER))
if (part != next_part and
(condition(part) if condition else True)):
break
else:
self.fail('Failed to setup object satisfying test preconditions %s'
% attempts)
return _hash, part, next_part, obj_path
def _create_object(self, policy, part, _hash, ext='.data'):
objects_dir = os.path.join(self.devices, self.existing_device,
get_policy_string('objects', policy))
shutil.rmtree(objects_dir, ignore_errors=True)
os.mkdir(objects_dir)
objdir = os.path.join(objects_dir, str(part), _hash[-3:], _hash)
os.makedirs(objdir)
timestamp = utils.Timestamp.now()
filename = timestamp.internal + ext
objname = os.path.join(objdir, filename)
with open(objname, "wb") as dummy:
dummy.write(b"Hello World!")
write_metadata(dummy,
{'name': self.obj_path, 'Content-Length': '12'})
return objdir, filename, timestamp
def _setup_object(self, condition=None, policy=None, ext='.data'):
policy = policy or self.policy
_hash, part, next_part, obj_path = self._get_object_name(condition)
self._hash = _hash
self.part = part
self.next_part = next_part
self.obj_path = obj_path
objects_dir = os.path.join(self.devices, self.existing_device,
get_policy_string('objects', policy))
self.objdir, self.object_fname, self.obj_ts = self._create_object(
policy, part, _hash, ext)
self.objname = os.path.join(self.objdir, self.object_fname)
self.part_dir = os.path.join(objects_dir, str(self.part))
self.suffix = self._hash[-3:]
self.suffix_dir = os.path.join(self.part_dir, self.suffix)
self.next_part_dir = os.path.join(objects_dir, str(self.next_part))
self.next_suffix_dir = os.path.join(self.next_part_dir, self.suffix)
self.expected_dir = os.path.join(self.next_suffix_dir, self._hash)
self.expected_file = os.path.join(self.expected_dir, self.object_fname)
def _make_link(self, filename, part_power):
# make a file in the older part_power location and link it to a file in
# the next part power location
new_filepath = os.path.join(self.expected_dir, filename)
older_filepath = utils.replace_partition_in_path(
self.devices, new_filepath, part_power)
os.makedirs(os.path.dirname(older_filepath))
with open(older_filepath, 'w') as fd:
fd.write(older_filepath)
os.makedirs(self.expected_dir)
os.link(older_filepath, new_filepath)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read()) # sanity check
return older_filepath, new_filepath
def _save_ring(self, policies=POLICIES):
self.rb._ring = None
rd = self.rb.get_ring()
for policy in policies:
rd.save(os.path.join(
self.testdir, '%s.ring.gz' % policy.ring_name))
# Enforce ring reloading in relinker
policy.object_ring = None
def tearDown(self):
shutil.rmtree(self.testdir, ignore_errors=True)
storage_policy.reload_storage_policies()
@contextmanager
def _mock_listdir(self):
orig_listdir = utils.listdir
def mocked(path):
if path == self.objects:
raise OSError
return orig_listdir(path)
with mock.patch('swift.common.utils.listdir', mocked):
yield
@contextmanager
def _mock_relinker(self):
with mock.patch.object(relinker.logging, 'getLogger',
return_value=self.logger), \
mock.patch.object(relinker, 'get_logger',
return_value=self.logger), \
mock.patch('swift.cli.relinker.DEFAULT_RECON_CACHE_PATH',
self.recon_cache_path):
yield
def test_workers_parent(self):
os.mkdir(os.path.join(self.devices, 'sda2'))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
pids = {
2: 0,
3: 0,
}
def mock_wait():
return pids.popitem()
with mock.patch('os.fork', side_effect=list(pids.keys())), \
mock.patch('os.wait', mock_wait):
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--workers', '2',
'--skip-mount',
]))
self.assertEqual(pids, {})
def test_workers_parent_bubbles_up_errors(self):
def do_test(wait_result, msg):
pids = {
2: 0,
3: 0,
4: 0,
5: wait_result,
6: 0,
}
with mock.patch('os.fork', side_effect=list(pids.keys())), \
mock.patch('os.wait', lambda: pids.popitem()), \
self._mock_relinker():
self.assertEqual(1, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertEqual(pids, {})
self.assertEqual([], self.logger.get_lines_for_level('error'))
warning_lines = self.logger.get_lines_for_level('warning')
self.assertTrue(
warning_lines[0].startswith('Worker (pid=5, devs='))
self.assertTrue(
warning_lines[0].endswith(msg),
'Expected log line to end with %r; got %r'
% (msg, warning_lines[0]))
self.assertFalse(warning_lines[1:])
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(2, len(info_lines))
self.assertIn('Starting relinker (cleanup=True) using 5 workers:',
info_lines[0])
self.assertIn('Finished relinker (cleanup=True):',
info_lines[1])
print(info_lines)
self.logger.clear()
os.mkdir(os.path.join(self.devices, 'sda2'))
os.mkdir(os.path.join(self.devices, 'sda3'))
os.mkdir(os.path.join(self.devices, 'sda4'))
os.mkdir(os.path.join(self.devices, 'sda5'))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
# signals get the low bits
do_test(9, 'exited in 0.0s after receiving signal: 9')
# exit codes get the high
do_test(1 << 8, 'completed in 0.0s with errors')
do_test(42 << 8, 'exited in 0.0s with unexpected status 42')
def test_workers_children(self):
os.mkdir(os.path.join(self.devices, 'sda2'))
os.mkdir(os.path.join(self.devices, 'sda3'))
os.mkdir(os.path.join(self.devices, 'sda4'))
os.mkdir(os.path.join(self.devices, 'sda5'))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
calls = []
def fake_fork():
calls.append('fork')
return 0
def fake_run(self):
calls.append(('run', self.device_list))
return 0
def fake_exit(status):
calls.append(('exit', status))
with mock.patch('os.fork', fake_fork), \
mock.patch('os._exit', fake_exit), \
mock.patch('swift.cli.relinker.Relinker.run', fake_run):
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--workers', '2',
'--skip-mount',
]))
self.assertEqual([
'fork',
('run', ['sda1', 'sda3', 'sda5']),
('exit', 0),
'fork',
('run', ['sda2', 'sda4']),
('exit', 0),
], calls)
# test too many workers
calls = []
with mock.patch('os.fork', fake_fork), \
mock.patch('os._exit', fake_exit), \
mock.patch('swift.cli.relinker.Relinker.run', fake_run):
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--workers', '6',
'--skip-mount',
]))
self.assertEqual([
'fork',
('run', ['sda1']),
('exit', 0),
'fork',
('run', ['sda2']),
('exit', 0),
'fork',
('run', ['sda3']),
('exit', 0),
'fork',
('run', ['sda4']),
('exit', 0),
'fork',
('run', ['sda5']),
('exit', 0),
], calls)
def _do_test_relinker_drop_privileges(self, command):
@contextmanager
def do_mocks():
# attach mocks to call_capture so that call order can be asserted
call_capture = mock.Mock()
mod = 'swift.cli.relinker.'
with mock.patch(mod + 'Relinker') as mock_relinker, \
mock.patch(mod + 'drop_privileges') as mock_dp, \
mock.patch(mod + 'os.listdir',
return_value=['sda', 'sdb']):
mock_relinker.return_value.run.return_value = 0
call_capture.attach_mock(mock_dp, 'drop_privileges')
call_capture.attach_mock(mock_relinker, 'run')
yield call_capture
# no user option
with do_mocks() as capture:
self.assertEqual(0, relinker.main([command, '--workers', '0']))
self.assertEqual([mock.call.run(mock.ANY, mock.ANY, ['sda', 'sdb'],
do_cleanup=(command == 'cleanup'))],
capture.method_calls)
# cli option --user
with do_mocks() as capture:
self.assertEqual(0, relinker.main([command, '--user', 'cli_user',
'--workers', '0']))
self.assertEqual([('drop_privileges', ('cli_user',), {}),
mock.call.run(mock.ANY, mock.ANY, ['sda', 'sdb'],
do_cleanup=(command == 'cleanup'))],
capture.method_calls)
# cli option --user takes precedence over conf file user
with do_mocks() as capture:
with mock.patch('swift.cli.relinker.readconf',
return_value={'user': 'conf_user'}):
self.assertEqual(0, relinker.main([command, 'conf_file',
'--user', 'cli_user',
'--workers', '0']))
self.assertEqual([('drop_privileges', ('cli_user',), {}),
mock.call.run(mock.ANY, mock.ANY, ['sda', 'sdb'],
do_cleanup=(command == 'cleanup'))],
capture.method_calls)
# conf file user
with do_mocks() as capture:
with mock.patch('swift.cli.relinker.readconf',
return_value={'user': 'conf_user',
'workers': '0'}):
self.assertEqual(0, relinker.main([command, 'conf_file']))
self.assertEqual([('drop_privileges', ('conf_user',), {}),
mock.call.run(mock.ANY, mock.ANY, ['sda', 'sdb'],
do_cleanup=(command == 'cleanup'))],
capture.method_calls)
def test_relinker_drop_privileges(self):
self._do_test_relinker_drop_privileges('relink')
self._do_test_relinker_drop_privileges('cleanup')
def _do_test_relinker_files_per_second(self, command):
# no files per second
with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
self.assertEqual(0, relinker.main([
command,
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
it.assert_not_called()
# zero files per second
with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
self.assertEqual(0, relinker.main([
command,
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--files-per-second', '0'
]))
it.assert_not_called()
# positive files per second
locations = iter([])
with mock.patch('swift.cli.relinker.audit_location_generator',
return_value=locations):
with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
self.assertEqual(0, relinker.main([
command,
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--files-per-second', '1.23'
]))
it.assert_called_once_with(locations, 1.23)
# negative files per second
err = StringIO()
with mock.patch('sys.stderr', err):
with self.assertRaises(SystemExit) as cm:
relinker.main([
command,
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--files-per-second', '-1'
])
self.assertEqual(2, cm.exception.code) # NB exit code 2 from argparse
self.assertIn('--files-per-second: invalid non_negative_float value',
err.getvalue())
def test_relink_files_per_second(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
self._do_test_relinker_files_per_second('relink')
def test_cleanup_files_per_second(self):
self._common_test_cleanup()
self._do_test_relinker_files_per_second('cleanup')
@patch_policies(
[StoragePolicy(0, name='gold', is_default=True),
ECStoragePolicy(1, name='platinum', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)],
fake_ring_args=[{}, {}])
def test_conf_file(self):
config = """
[DEFAULT]
swift_dir = %s
devices = /test/node
mount_check = false
reclaim_age = 5184000
[object-relinker]
log_level = WARNING
log_name = test-relinker
""" % self.testdir
conf_file = os.path.join(self.testdir, 'relinker.conf')
with open(conf_file, 'w') as f:
f.write(dedent(config))
# cite conf file on command line
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker:
relinker.main(['relink', conf_file, '--device', 'sdx', '--debug'])
exp_conf = {
'__file__': mock.ANY,
'swift_dir': self.testdir,
'devices': '/test/node',
'mount_check': False,
'reclaim_age': '5184000',
'files_per_second': 0.0,
'log_name': 'test-relinker',
'log_level': 'DEBUG',
'policies': POLICIES,
'workers': 'auto',
'partitions': set(),
'recon_cache_path': '/var/cache/swift',
'stats_interval': 300.0,
}
mock_relinker.assert_called_once_with(
exp_conf, mock.ANY, ['sdx'], do_cleanup=False)
logger = mock_relinker.call_args[0][1]
# --debug overrides conf file
self.assertEqual(logging.DEBUG, logger.getEffectiveLevel())
self.assertEqual('test-relinker', logger.logger.name)
# check the conf is passed to DiskFileRouter
self._save_ring()
with mock.patch('swift.cli.relinker.diskfile.DiskFileRouter',
side_effect=DiskFileRouter) as mock_dfr:
relinker.main(['relink', conf_file, '--device', 'sdx', '--debug'])
mock_dfr.assert_called_once_with(exp_conf, mock.ANY)
# flip mount_check, no --debug...
config = """
[DEFAULT]
swift_dir = test/swift/dir
devices = /test/node
mount_check = true
[object-relinker]
log_level = WARNING
log_name = test-relinker
files_per_second = 11.1
recon_cache_path = /var/cache/swift-foo
stats_interval = 111
"""
with open(conf_file, 'w') as f:
f.write(dedent(config))
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker:
relinker.main(['relink', conf_file, '--device', 'sdx'])
mock_relinker.assert_called_once_with({
'__file__': mock.ANY,
'swift_dir': 'test/swift/dir',
'devices': '/test/node',
'mount_check': True,
'files_per_second': 11.1,
'log_name': 'test-relinker',
'log_level': 'WARNING',
'policies': POLICIES,
'partitions': set(),
'workers': 'auto',
'recon_cache_path': '/var/cache/swift-foo',
'stats_interval': 111.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
logger = mock_relinker.call_args[0][1]
self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
self.assertEqual('test-relinker', logger.logger.name)
# override with cli options...
logger = debug_logger()
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker:
with mock.patch('swift.cli.relinker.get_logger',
return_value=logger):
relinker.main([
'relink', conf_file, '--device', 'sdx', '--debug',
'--swift-dir', 'cli-dir', '--devices', 'cli-devs',
'--skip-mount-check', '--files-per-second', '2.2',
'--policy', '1', '--partition', '123',
'--partition', '123', '--partition', '456',
'--workers', '2',
'--stats-interval', '222',
])
mock_relinker.assert_called_once_with({
'__file__': mock.ANY,
'swift_dir': 'cli-dir',
'devices': 'cli-devs',
'mount_check': False,
'files_per_second': 2.2,
'log_level': 'DEBUG',
'log_name': 'test-relinker',
'policies': {POLICIES[1]},
'partitions': {123, 456},
'workers': 2,
'recon_cache_path': '/var/cache/swift-foo',
'stats_interval': 222.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker, \
mock.patch('logging.basicConfig') as mock_logging_config:
relinker.main(['relink', '--device', 'sdx',
'--swift-dir', 'cli-dir', '--devices', 'cli-devs',
'--skip-mount-check'])
mock_relinker.assert_called_once_with({
'swift_dir': 'cli-dir',
'devices': 'cli-devs',
'mount_check': False,
'files_per_second': 0.0,
'log_level': 'INFO',
'policies': POLICIES,
'partitions': set(),
'workers': 'auto',
'recon_cache_path': '/var/cache/swift',
'stats_interval': 300.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
mock_logging_config.assert_called_once_with(
format='%(message)s', level=logging.INFO, filename=None)
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker, \
mock.patch('logging.basicConfig') as mock_logging_config:
relinker.main([
'relink', '--debug',
'--swift-dir', 'cli-dir',
'--devices', 'cli-devs',
'--device', 'sdx',
'--skip-mount-check',
'--policy', '0',
'--policy', '1',
'--policy', '0',
])
mock_relinker.assert_called_once_with({
'swift_dir': 'cli-dir',
'devices': 'cli-devs',
'mount_check': False,
'files_per_second': 0.0,
'log_level': 'DEBUG',
'policies': set(POLICIES),
'partitions': set(),
'workers': 'auto',
'recon_cache_path': '/var/cache/swift',
'stats_interval': 300.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
# --debug is now effective
mock_logging_config.assert_called_once_with(
format='%(message)s', level=logging.DEBUG, filename=None)
# now test overriding workers back to auto
config = """
[DEFAULT]
swift_dir = test/swift/dir
devices = /test/node
mount_check = true
[object-relinker]
log_level = WARNING
log_name = test-relinker
files_per_second = 11.1
workers = 8
"""
with open(conf_file, 'w') as f:
f.write(dedent(config))
devices = ['sdx%d' % i for i in range(8, 1)]
cli_cmd = ['relink', conf_file, '--device', 'sdx', '--workers', 'auto']
for device in devices:
cli_cmd.extend(['--device', device])
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker:
relinker.main(cli_cmd)
mock_relinker.assert_called_once_with({
'__file__': mock.ANY,
'swift_dir': 'test/swift/dir',
'devices': '/test/node',
'mount_check': True,
'files_per_second': 11.1,
'log_name': 'test-relinker',
'log_level': 'WARNING',
'policies': POLICIES,
'partitions': set(),
'workers': 'auto',
'recon_cache_path': '/var/cache/swift',
'stats_interval': 300.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
logger = mock_relinker.call_args[0][1]
self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
self.assertEqual('test-relinker', logger.logger.name)
# and now globally
config = """
[DEFAULT]
swift_dir = test/swift/dir
devices = /test/node
mount_check = true
workers = 8
[object-relinker]
log_level = WARNING
log_name = test-relinker
files_per_second = 11.1
"""
with open(conf_file, 'w') as f:
f.write(dedent(config))
with mock.patch('swift.cli.relinker.Relinker') as mock_relinker:
relinker.main(cli_cmd)
mock_relinker.assert_called_once_with({
'__file__': mock.ANY,
'swift_dir': 'test/swift/dir',
'devices': '/test/node',
'mount_check': True,
'files_per_second': 11.1,
'log_name': 'test-relinker',
'log_level': 'WARNING',
'policies': POLICIES,
'partitions': set(),
'workers': 'auto',
'recon_cache_path': '/var/cache/swift',
'stats_interval': 300.0,
}, mock.ANY, ['sdx'], do_cleanup=False)
logger = mock_relinker.call_args[0][1]
self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
self.assertEqual('test-relinker', logger.logger.name)
def test_relinker_utils_get_hub(self):
cli_cmd = ['relink', '--device', 'sdx', '--workers', 'auto',
'--device', '/some/device']
with mock.patch('swift.cli.relinker.Relinker'):
relinker.main(cli_cmd)
self.mock_hubs.use_hub.assert_called_with(utils.get_hub())
def test_relink_first_quartile_no_rehash(self):
# we need object name in lower half of current part
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
self.assertLess(self.next_part, 2 ** PART_POWER)
self.rb.prepare_increase_partition_power()
self._save_ring()
with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
return_value='foo') as mock_hash_suffix:
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# ... and no rehash
self.assertEqual([], mock_hash_suffix.call_args_list)
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
# Invalidated now, rehashed during cleanup
with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
self.assertEqual(fp.read(), self._hash[-3:] + '\n')
self.assertFalse(os.path.exists(
os.path.join(self.next_part_dir, 'hashes.pkl')))
def test_relink_second_quartile_does_rehash(self):
# we need a part in upper half of current part power
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.assertGreaterEqual(self.next_part, 2 ** PART_POWER)
self.assertTrue(self.rb.prepare_increase_partition_power())
self._save_ring()
with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
return_value='foo') as mock_hash_suffix:
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# we rehash the new suffix dirs as we go
self.assertEqual([mock.call(self.next_suffix_dir, policy=self.policy)],
mock_hash_suffix.call_args_list)
# Invalidated and rehashed during relinking
with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
self.assertEqual(fp.read(), '')
with open(os.path.join(self.next_part_dir, 'hashes.pkl'), 'rb') as fp:
hashes = pickle.load(fp)
self.assertIn(self._hash[-3:], hashes)
self.assertEqual('foo', hashes[self._hash[-3:]])
self.assertFalse(os.path.exists(
os.path.join(self.part_dir, 'hashes.invalid')))
# Check that only the dirty partition in upper half of next part power
# has been created and rehashed
other_next_part = self.next_part ^ 1
other_next_part_dir = os.path.join(self.objects, str(other_next_part))
self.assertFalse(os.path.exists(other_next_part_dir))
def _do_link_test(self, command, old_file_specs, new_file_specs,
conflict_file_specs, exp_old_specs, exp_new_specs,
exp_ret_code=0, relink_errors=None,
mock_relink_paths=None, extra_options=None):
# Each 'spec' is a tuple (file extension, timestamp offset); files are
# created for each old_file_specs and links are created for each in
# new_file_specs, then cleanup is run and checks made that
# exp_old_specs and exp_new_specs exist.
# - conflict_file_specs are files in the new partition that are *not*
# linked to the same file in the old partition
# - relink_errors is a dict ext->exception; the exception will be
# raised each time relink_paths is called with a target_path ending
# with 'ext'
self.assertFalse(relink_errors and mock_relink_paths) # sanity check
new_file_specs = [] if new_file_specs is None else new_file_specs
conflict_file_specs = ([] if conflict_file_specs is None
else conflict_file_specs)
exp_old_specs = [] if exp_old_specs is None else exp_old_specs
relink_errors = {} if relink_errors is None else relink_errors
extra_options = extra_options if extra_options else []
# remove the file created by setUp - we'll create it again if wanted
os.unlink(self.objname)
def make_filenames(specs):
filenames = []
for ext, ts_delta in specs:
ts = utils.Timestamp(float(self.obj_ts) + ts_delta)
filename = '.'.join([ts.internal, ext])
filenames.append(filename)
return filenames
old_filenames = make_filenames(old_file_specs)
new_filenames = make_filenames(new_file_specs)
conflict_filenames = make_filenames(conflict_file_specs)
if new_filenames or conflict_filenames:
os.makedirs(self.expected_dir)
for filename in old_filenames:
filepath = os.path.join(self.objdir, filename)
with open(filepath, 'w') as fd:
fd.write(filepath)
for filename in new_filenames:
new_filepath = os.path.join(self.expected_dir, filename)
if filename in old_filenames:
filepath = os.path.join(self.objdir, filename)
os.link(filepath, new_filepath)
else:
with open(new_filepath, 'w') as fd:
fd.write(new_filepath)
for filename in conflict_filenames:
new_filepath = os.path.join(self.expected_dir, filename)
with open(new_filepath, 'w') as fd:
fd.write(new_filepath)
orig_relink_paths = relink_paths
def default_mock_relink_paths(target_path, new_target_path, **kwargs):
for ext, error in relink_errors.items():
if target_path.endswith(ext):
raise error
return orig_relink_paths(target_path, new_target_path,
**kwargs)
with mock.patch('swift.cli.relinker.diskfile.relink_paths',
mock_relink_paths if mock_relink_paths
else default_mock_relink_paths):
with self._mock_relinker():
self.assertEqual(exp_ret_code, relinker.main([
command,
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
] + extra_options), [self.logger.all_log_lines()])
if exp_new_specs:
self.assertTrue(os.path.isdir(self.expected_dir))
exp_filenames = make_filenames(exp_new_specs)
actual_new = sorted(os.listdir(self.expected_dir))
self.assertEqual(sorted(exp_filenames), sorted(actual_new))
else:
self.assertFalse(os.path.exists(self.expected_dir))
if exp_old_specs:
exp_filenames = make_filenames(exp_old_specs)
actual_old = sorted(os.listdir(self.objdir))
self.assertEqual(sorted(exp_filenames), sorted(actual_old))
else:
self.assertFalse(os.path.exists(self.objdir))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def _relink_test(self, old_file_specs, new_file_specs,
exp_old_specs, exp_new_specs):
# force the rehash to not happen during relink so that we can inspect
# files in the new partition hash dir before they are cleaned up
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
self._do_link_test('relink', old_file_specs, new_file_specs, None,
exp_old_specs, exp_new_specs)
def test_relink_data_file(self):
self._relink_test((('data', 0),),
None,
(('data', 0),),
(('data', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_meta_files(self):
self._relink_test((('data', 0), ('meta', 1)),
None,
(('data', 0), ('meta', 1)),
(('data', 0), ('meta', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_meta_file(self):
self._relink_test((('meta', 0),),
None,
(('meta', 0),),
(('meta', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_ts_file(self):
self._relink_test((('ts', 0),),
None,
(('ts', 0),),
(('ts', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_meta_ts_files(self):
self._relink_test((('data', 0), ('meta', 1), ('ts', 2)),
None,
(('ts', 2),),
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_ts_meta_files(self):
self._relink_test((('data', 0), ('ts', 1), ('meta', 2)),
None,
(('ts', 1), ('meta', 2)),
(('ts', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_ts_data_meta_files(self):
self._relink_test((('ts', 0), ('data', 1), ('meta', 2)),
None,
(('data', 1), ('meta', 2)),
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_data_meta_files(self):
self._relink_test((('data', 0), ('data', 1), ('meta', 2)),
None,
(('data', 1), ('meta', 2)),
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_existing_meta_files(self):
self._relink_test((('data', 0), ('meta', 1)),
(('meta', 1),),
(('data', 0), ('meta', 1)),
(('data', 0), ('meta', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_meta_existing_newer_data_files(self):
self._relink_test((('data', 0), ('meta', 2)),
(('data', 1),),
(('data', 0), ('meta', 2)),
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_existing_older_data_files_no_cleanup(self):
self._relink_test((('data', 1),),
(('data', 0),),
(('data', 1),),
(('data', 0), ('data', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_existing_older_meta_files(self):
self._relink_test((('data', 0), ('meta', 2)),
(('meta', 1),),
(('data', 0), ('meta', 2)),
(('data', 0), ('meta', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_existing_data_meta_ts_files(self):
self._relink_test((('data', 0), ('meta', 1), ('ts', 2)),
(('data', 0),),
(('ts', 2),),
(('data', 0), ('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 1 linked, 0 removed, 0 errors)', info_lines)
def test_relink_existing_data_meta_older_ts_files(self):
self._relink_test((('data', 1), ('meta', 2)),
(('ts', 0),),
(('data', 1), ('meta', 2)),
(('ts', 0), ('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(2 files, 2 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_meta_existing_ts_files(self):
self._relink_test((('data', 0), ('meta', 1), ('ts', 2)),
(('ts', 2),),
(('ts', 2),),
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 0 linked, 0 removed, 0 errors)', info_lines)
def test_relink_data_meta_existing_newer_ts_files(self):
self._relink_test((('data', 0), ('meta', 1)),
(('ts', 2),),
(('data', 0), ('meta', 1)),
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(0 files, 0 linked, 0 removed, 0 errors)', info_lines)
def test_relink_ts_existing_newer_data_files(self):
self._relink_test((('ts', 0),),
(('data', 2),),
(('ts', 0),),
(('data', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(0 files, 0 linked, 0 removed, 0 errors)', info_lines)
def test_relink_conflicting_ts_file(self):
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
self._do_link_test('relink',
(('ts', 0),),
None,
(('ts', 0),),
(('ts', 0),),
(('ts', 0),),
exp_ret_code=0)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 0 linked, 0 removed, 0 errors)',
info_lines)
def test_relink_link_already_exists_but_different_inode(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
# make a file where we'd expect the link to be created
os.makedirs(self.expected_dir)
with open(self.expected_file, 'w'):
pass
# expect an error
with self._mock_relinker():
self.assertEqual(1, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('Error relinking: failed to relink %s to %s: '
'[Errno 17] File exists'
% (self.objname, self.expected_file),
warning_lines[0])
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 0 linked, 0 removed, 1 errors)',
warning_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_link_already_exists(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
orig_relink_paths = relink_paths
def mock_relink_paths(target_path, new_target_path, **kwargs):
# pretend another process has created the link before this one
os.makedirs(self.expected_dir)
os.link(target_path, new_target_path)
return orig_relink_paths(target_path, new_target_path,
**kwargs)
with self._mock_relinker():
with mock.patch('swift.cli.relinker.diskfile.relink_paths',
mock_relink_paths):
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 0 linked, 0 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_link_target_disappears(self):
# we need object name in lower half of current part so that there is no
# rehash of the new partition which wold erase the empty new partition
# - we want to assert it was created
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
orig_relink_paths = relink_paths
def mock_relink_paths(target_path, new_target_path, **kwargs):
# pretend another process has cleaned up the target path
os.unlink(target_path)
return orig_relink_paths(target_path, new_target_path,
**kwargs)
with self._mock_relinker():
with mock.patch('swift.cli.relinker.diskfile.relink_paths',
mock_relink_paths):
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertFalse(os.path.isfile(self.expected_file))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=False) '
'(1 files, 0 linked, 0 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_no_applicable_policy(self):
# NB do not prepare part power increase
self._save_ring()
with self._mock_relinker():
self.assertEqual(2, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
]))
self.assertEqual(self.logger.get_lines_for_level('warning'),
['No policy found to increase the partition power.'])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_not_mounted(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
with self._mock_relinker():
self.assertEqual(1, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
]))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Skipping sda1 as it is not mounted',
'1 disks were unmounted',
'0 hash dirs processed (cleanup=False) '
'(0 files, 0 linked, 0 removed, 0 errors)',
])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_listdir_error(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
with self._mock_relinker():
with self._mock_listdir():
self.assertEqual(1, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount-check'
]))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Skipping %s because ' % self.objects,
'There were 1 errors listing partition directories',
'0 hash dirs processed (cleanup=False) '
'(0 files, 0 linked, 0 removed, 1 errors)',
])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_relink_device_filter(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--device', self.existing_device,
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
def test_relink_device_filter_invalid(self):
self.rb.prepare_increase_partition_power()
self._save_ring()
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--device', 'none',
]))
self.assertFalse(os.path.isdir(self.expected_dir))
self.assertFalse(os.path.isfile(self.expected_file))
def test_relink_partition_filter(self):
# ensure partitions are in second quartile so that new partitions are
# not included in the relinked partitions when the relinker is re-run:
# this makes the number of partitions visited predictable (i.e. 3)
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
# create some other test files in different partitions
other_objs = []
used_parts = [self.part, self.part + 1]
for i in range(2):
_hash, part, next_part, obj = self._get_object_name(
lambda part:
part >= 2 ** (PART_POWER - 1) and part not in used_parts)
obj_dir = os.path.join(self.objects, str(part), _hash[-3:], _hash)
os.makedirs(obj_dir)
obj_file = os.path.join(obj_dir, self.object_fname)
with open(obj_file, 'w'):
pass
other_objs.append((part, obj_file))
used_parts.append(part)
self.rb.prepare_increase_partition_power()
self._save_ring()
# invalid partition
with mock.patch('sys.stdout'), mock.patch('sys.stderr'):
with self.assertRaises(SystemExit) as cm:
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--partition', '-1',
]))
self.assertEqual(2, cm.exception.code)
with mock.patch('sys.stdout'), mock.patch('sys.stderr'):
with self.assertRaises(SystemExit) as cm:
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--partition', 'abc',
]))
self.assertEqual(2, cm.exception.code)
# restrict to a partition with no test object
self.logger.clear()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--partition', str(self.part + 1),
]))
self.assertFalse(os.path.isdir(self.expected_dir))
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(4, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy platinum under %s (cleanup=False)'
% os.path.join(self.devices, 'sda1'),
'0 hash dirs processed (cleanup=False) (0 files, 0 linked, '
'0 removed, 0 errors)'], info_lines[1:3]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[3])
self.assertEqual([], self.logger.get_lines_for_level('error'))
# restrict to one partition with a test object
self.logger.clear()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--partition', str(self.part),
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(5, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy platinum under %s (cleanup=False)'
% os.path.join(self.devices, 'sda1'),
'Step: relink Device: sda1 Policy: platinum Partitions: 1/3',
'1 hash dirs processed (cleanup=False) (1 files, 1 linked, '
'0 removed, 0 errors)'], info_lines[1:4]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[4])
self.assertEqual([], self.logger.get_lines_for_level('error'))
# restrict to two partitions with test objects
self.logger.clear()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--partition', str(other_objs[0][0]),
'-p', str(other_objs[0][0]), # duplicates should be ignored
'-p', str(other_objs[1][0]),
]))
expected_file = utils.replace_partition_in_path(
self.devices, other_objs[0][1], PART_POWER + 1)
self.assertTrue(os.path.isfile(expected_file))
stat_old = os.stat(other_objs[0][1])
stat_new = os.stat(expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
expected_file = utils.replace_partition_in_path(
self.devices, other_objs[1][1], PART_POWER + 1)
self.assertTrue(os.path.isfile(expected_file))
stat_old = os.stat(other_objs[1][1])
stat_new = os.stat(expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(6, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy platinum under %s (cleanup=False)'
% os.path.join(self.devices, 'sda1'),
'Step: relink Device: sda1 Policy: platinum Partitions: 2/3',
'Step: relink Device: sda1 Policy: platinum Partitions: 3/3',
'2 hash dirs processed (cleanup=False) (2 files, 2 linked, '
'0 removed, 0 errors)'], info_lines[1:5]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[5])
self.assertEqual([], self.logger.get_lines_for_level('error'))
@patch_policies(
[StoragePolicy(0, name='gold', is_default=True),
ECStoragePolicy(1, name='platinum', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_relink_policy_option(self):
self._setup_object()
self.rb.prepare_increase_partition_power()
self._save_ring()
# invalid policy
with mock.patch('sys.stdout'), mock.patch('sys.stderr'):
with self.assertRaises(SystemExit) as cm:
relinker.main([
'relink',
'--swift-dir', self.testdir,
'--policy', '9',
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
])
self.assertEqual(2, cm.exception.code)
with mock.patch('sys.stdout'), mock.patch('sys.stderr'):
with self.assertRaises(SystemExit) as cm:
relinker.main([
'relink',
'--swift-dir', self.testdir,
'--policy', 'pewter',
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
])
self.assertEqual(2, cm.exception.code)
# policy with no object
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--policy', '1',
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
]))
self.assertFalse(os.path.isdir(self.expected_dir))
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(4, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy platinum under %s/%s (cleanup=False)'
% (self.devices, self.existing_device),
'0 hash dirs processed (cleanup=False) (0 files, 0 linked, '
'0 removed, 0 errors)'], info_lines[1:3]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[3])
self.assertEqual([], self.logger.get_lines_for_level('error'))
# policy with object
self.logger.clear()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--policy', '0',
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(5, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy gold under %s/%s (cleanup=False)'
% (self.devices, self.existing_device),
'Step: relink Device: sda1 Policy: gold Partitions: 1/1',
'1 hash dirs processed (cleanup=False) (1 files, 1 linked, '
'0 removed, 0 errors)'], info_lines[1:4]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[4])
self.assertEqual([], self.logger.get_lines_for_level('error'))
# policy name works, too
self.logger.clear()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--policy', 'gold',
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
]))
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(4, len(info_lines))
self.assertIn('Starting relinker (cleanup=False) using 1 workers:',
info_lines[0])
self.assertEqual(
['Processing files for policy gold under %s/%s (cleanup=False)'
% (self.devices, self.existing_device),
'0 hash dirs processed (cleanup=False) '
'(0 files, 0 linked, 0 removed, 0 errors)'], info_lines[1:3]
)
self.assertIn('Finished relinker (cleanup=False):',
info_lines[3])
self.assertEqual([], self.logger.get_lines_for_level('error'))
@patch_policies(
[StoragePolicy(0, name='gold', is_default=True),
ECStoragePolicy(1, name='platinum', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_relink_all_policies(self):
# verify that only policies in appropriate state are processed
def do_relink(options=None):
options = [] if options is None else options
with self._mock_relinker():
with mock.patch(
'swift.cli.relinker.Relinker.process_policy') \
as mocked:
res = relinker.main([
'relink',
'--swift-dir', self.testdir,
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
] + options)
self.assertEqual([], self.logger.get_lines_for_level('error'))
return res, mocked
self._save_ring(POLICIES) # no ring prepared for increase
res, mocked = do_relink()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self._save_ring([POLICIES[0]]) # not prepared for increase
self.rb.prepare_increase_partition_power()
self._save_ring([POLICIES[1]]) # prepared for increase
res, mocked = do_relink()
self.assertEqual([mock.call(POLICIES[1])], mocked.call_args_list)
self.assertEqual(0, res)
res, mocked = do_relink(['--policy', '0'])
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self._save_ring([POLICIES[0]]) # prepared for increase
res, mocked = do_relink()
self.assertEqual([mock.call(POLICIES[0]), mock.call(POLICIES[1])],
mocked.call_args_list)
self.assertEqual(0, res)
self.rb.increase_partition_power()
self._save_ring([POLICIES[0]]) # increased
res, mocked = do_relink()
self.assertEqual([mock.call(POLICIES[1])], mocked.call_args_list)
self.assertEqual(0, res)
self._save_ring([POLICIES[1]]) # increased
res, mocked = do_relink()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
res, mocked = do_relink(['--policy', '0'])
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self.rb.finish_increase_partition_power()
self._save_ring(POLICIES) # all rings finished
res, mocked = do_relink()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
def test_relink_conflicting_ts_is_linked_to_part_power(self):
# link from next partition to current partition;
# different file in current-1 partition
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
filename = '.'.join([self.obj_ts.internal, 'ts'])
new_filepath = os.path.join(self.expected_dir, filename)
old_filepath = os.path.join(self.objdir, filename)
# setup a file in the current-1 part power (PART_POWER - 1) location
# that is *not* linked to the file in the next part power location
older_filepath = utils.replace_partition_in_path(
self.devices, new_filepath, PART_POWER - 1)
os.makedirs(os.path.dirname(older_filepath))
with open(older_filepath, 'w') as fd:
fd.write(older_filepath)
self._do_link_test('relink',
(('ts', 0),),
(('ts', 0),),
None,
(('ts', 0),),
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
# both the PART_POWER and PART_POWER - N partitions are visited, no new
# links are created, and both the older files are retained
self.assertIn('2 hash dirs processed (cleanup=False) '
'(2 files, 0 linked, 0 removed, 0 errors)',
info_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(old_filepath, fd.read())
self.assertTrue(os.path.exists(older_filepath))
def test_relink_conflicting_ts_is_linked_to_part_power_minus_1(self):
# link from next partition to current-1 partition;
# different file in current partition
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older (PART_POWER - 1) location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 1)
self._do_link_test('relink',
(('ts', 0),),
None,
None, # we already made file linked to older part
(('ts', 0),), # retained
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
# both the PART_POWER and PART_POWER - N partitions are visited, no new
# links are created, and both the older files are retained
self.assertIn('2 hash dirs processed (cleanup=False) '
'(2 files, 0 linked, 0 removed, 0 errors)',
info_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
# prev part power file is retained because it is link target
self.assertTrue(os.path.exists(older_filepath))
def test_relink_conflicting_ts_is_linked_to_part_power_minus_2_err(self):
# link from next partition to current-2 partition;
# different file in current partition
# by default the relinker will NOT validate the current-2 location
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older (PART_POWER - 2) location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 2)
self._do_link_test('relink',
(('ts', 0),),
None,
None, # we already made file linked to older part
(('ts', 0),), # retained
(('ts', 0),),
exp_ret_code=0)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('2 hash dirs processed (cleanup=False) '
'(2 files, 0 linked, 0 removed, 0 errors)',
info_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
# prev-1 part power file is always retained because it is link target
self.assertTrue(os.path.exists(older_filepath))
def test_relink_conflicting_ts_both_in_older_part_powers(self):
# link from next partition to current-1 partition;
# different file in current partition
# different file in current-2 location
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 2))
self.rb.prepare_increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older (PART_POWER - 1) location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 1)
# setup a file in an even older part power (PART_POWER - 2) location
# that is *not* linked to the file in the next part power location
oldest_filepath = utils.replace_partition_in_path(
self.devices, new_filepath, PART_POWER - 2)
os.makedirs(os.path.dirname(oldest_filepath))
with open(oldest_filepath, 'w') as fd:
fd.write(oldest_filepath)
self._do_link_test('relink',
(('ts', 0),),
None,
None, # we already made file linked to older part
(('ts', 0),), # retained
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
# both the PART_POWER and PART_POWER - N partitions are visited, no new
# links are created, and both the older files are retained
self.assertIn('3 hash dirs processed (cleanup=False) '
'(3 files, 0 linked, 0 removed, 0 errors)',
info_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
self.assertTrue(os.path.exists(older_filepath)) # linked so retained
self.assertTrue(os.path.exists(oldest_filepath)) # retained anyway
@patch_policies(
[StoragePolicy(0, name='gold', is_default=True),
ECStoragePolicy(1, name='platinum', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_cleanup_all_policies(self):
# verify that only policies in appropriate state are processed
def do_cleanup(options=None):
options = [] if options is None else options
with mock.patch(
'swift.cli.relinker.Relinker.process_policy') as mocked:
res = relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--skip-mount',
'--devices', self.devices,
'--device', self.existing_device,
] + options)
return res, mocked
self._save_ring(POLICIES) # no ring prepared for increase
res, mocked = do_cleanup()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self.rb.prepare_increase_partition_power()
self._save_ring(POLICIES) # all rings prepared for increase
res, mocked = do_cleanup()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self.rb.increase_partition_power()
self._save_ring([POLICIES[0]]) # increased
res, mocked = do_cleanup()
self.assertEqual([mock.call(POLICIES[0])], mocked.call_args_list)
self.assertEqual(0, res)
res, mocked = do_cleanup(['--policy', '1'])
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
self._save_ring([POLICIES[1]]) # increased
res, mocked = do_cleanup()
self.assertEqual([mock.call(POLICIES[0]), mock.call(POLICIES[1])],
mocked.call_args_list)
self.assertEqual(0, res)
self.rb.finish_increase_partition_power()
self._save_ring([POLICIES[1]]) # finished
res, mocked = do_cleanup()
self.assertEqual([mock.call(POLICIES[0])], mocked.call_args_list)
self.assertEqual(0, res)
self._save_ring([POLICIES[0]]) # finished
res, mocked = do_cleanup()
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
res, mocked = do_cleanup(['--policy', '1'])
self.assertEqual([], mocked.call_args_list)
self.assertEqual(2, res)
def _common_test_cleanup(self, relink=True):
# Create a ring that has prev_part_power set
self.rb.prepare_increase_partition_power()
self._save_ring()
if relink:
conf = {'swift_dir': self.testdir,
'devices': self.devices,
'mount_check': False,
'files_per_second': 0,
'policies': POLICIES,
'recon_cache_path': self.recon_cache_path,
'workers': 0}
self.assertEqual(0, relinker.Relinker(
conf, logger=self.logger, device_list=[self.existing_device],
do_cleanup=False).run())
self.rb.increase_partition_power()
self._save_ring()
def _cleanup_test(self, old_file_specs, new_file_specs,
conflict_file_specs, exp_old_specs, exp_new_specs,
exp_ret_code=0, relink_errors=None):
# force the new partitions to be greater than the median so that they
# are not rehashed during cleanup, meaning we can inspect the outcome
# of the cleanup relinks and removes
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
self._do_link_test('cleanup', old_file_specs, new_file_specs,
conflict_file_specs, exp_old_specs, exp_new_specs,
exp_ret_code, relink_errors)
def test_cleanup_data_meta_files(self):
self._cleanup_test((('data', 0), ('meta', 1)),
(('data', 0), ('meta', 1)),
None,
None,
(('data', 0), ('meta', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_file(self):
self._cleanup_test((('data', 0),),
None,
None,
None,
(('data', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_meta_files(self):
self._cleanup_test((('data', 0), ('meta', 1)),
None,
None,
None,
(('data', 0), ('meta', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_meta_file(self):
self._cleanup_test((('meta', 0),),
None,
None,
None,
(('meta', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_ts_file(self):
self._cleanup_test((('ts', 0),),
None,
None,
None,
(('ts', 0),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_meta_missing_ts_files(self):
self._cleanup_test((('data', 0), ('meta', 1), ('ts', 2)),
None,
None,
None,
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_ts_missing_meta_files(self):
self._cleanup_test((('data', 0), ('ts', 1), ('meta', 2)),
None,
None,
None,
(('ts', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_ts_missing_data_missing_meta_files(self):
self._cleanup_test((('ts', 0), ('data', 1), ('meta', 2)),
None,
None,
None,
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_data_missing_meta_files(self):
self._cleanup_test((('data', 0), ('data', 1), ('meta', 2)),
None,
None,
None,
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_existing_meta_files(self):
self._cleanup_test((('data', 0), ('meta', 1)),
(('meta', 1),),
None,
None,
(('data', 0), ('meta', 1)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 1 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_meta_existing_newer_data_files(self):
self._cleanup_test((('data', 0), ('meta', 2)),
(('data', 1),),
None,
None,
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_meta_existing_older_meta_files(self):
self._cleanup_test((('data', 0), ('meta', 2)),
(('meta', 1),),
None,
None,
(('data', 0), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_missing_meta_missing_ts_files(self):
self._cleanup_test((('data', 0), ('meta', 1), ('ts', 2)),
(('data', 0),),
None,
None,
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_missing_meta_existing_older_ts_files(self):
self._cleanup_test((('data', 1), ('meta', 2)),
(('ts', 0),),
None,
None,
(('data', 1), ('meta', 2)))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_data_meta_existing_ts_files(self):
self._cleanup_test((('data', 0), ('meta', 1), ('ts', 2)),
(('ts', 2),),
None,
None,
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_data_meta_existing_newer_ts_files(self):
self._cleanup_test((('data', 0), ('meta', 1)),
(('ts', 2),),
None,
None,
(('ts', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 2 removed, 0 errors)',
info_lines)
def test_cleanup_ts_existing_newer_data_files(self):
self._cleanup_test((('ts', 0),),
(('data', 2),),
None,
None,
(('data', 2),))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_missing_data_file_relink_fails(self):
self._cleanup_test((('data', 0), ('meta', 1)),
(('meta', 1),),
None,
(('data', 0), ('meta', 1)), # nothing is removed
(('meta', 1),),
exp_ret_code=1,
relink_errors={'data': OSError(errno.EPERM, 'oops')}
)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 0 removed, 1 errors)',
warning_lines)
def test_cleanup_missing_meta_file_relink_fails(self):
self._cleanup_test((('data', 0), ('meta', 1)),
(('data', 0),),
None,
(('data', 0), ('meta', 1)), # nothing is removed
(('data', 0),),
exp_ret_code=1,
relink_errors={'meta': OSError(errno.EPERM, 'oops')}
)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 0 removed, 1 errors)',
warning_lines)
def test_cleanup_missing_data_and_meta_file_one_relink_fails(self):
self._cleanup_test((('data', 0), ('meta', 1)),
None,
None,
(('data', 0), ('meta', 1)), # nothing is removed
(('data', 0),),
exp_ret_code=1,
relink_errors={'meta': OSError(errno.EPERM, 'oops')}
)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 1 linked, 0 removed, 1 errors)',
warning_lines)
def test_cleanup_missing_data_and_meta_file_both_relinks_fails(self):
self._cleanup_test((('data', 0), ('meta', 1)),
None,
None,
(('data', 0), ('meta', 1)), # nothing is removed
None,
exp_ret_code=1,
relink_errors={'data': OSError(errno.EPERM, 'oops'),
'meta': OSError(errno.EPERM, 'oops')}
)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 0 removed, 2 errors)',
warning_lines)
def test_cleanup_conflicting_data_file(self):
self._cleanup_test((('data', 0),),
None,
(('data', 0),), # different inode
(('data', 0),),
(('data', 0),),
exp_ret_code=1)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 0 removed, 1 errors)',
warning_lines)
def test_cleanup_conflicting_ts_file(self):
self._cleanup_test((('ts', 0),),
None,
(('ts', 0),), # different inode but same timestamp
None,
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 1 removed, 0 errors)',
info_lines)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
def test_cleanup_conflicting_ts_is_linked_to_part_power_minus_1(self):
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older PART_POWER - 1 location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 1)
self._do_link_test('cleanup',
(('ts', 0),),
None,
None, # we already made file linked to older part
None,
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
# both the PART_POWER and PART_POWER - N partitions are visited, no new
# links are created, and both the older files are removed
self.assertIn('2 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 2 removed, 0 errors)',
info_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
self.assertFalse(os.path.exists(older_filepath))
def test_cleanup_conflicting_ts_is_linked_to_part_power_minus_2_err(self):
# link from next partition to current-2 partition;
# different file in current partition
# by default the relinker will NOT validate the current-2 location
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older (PART_POWER - 2) location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 2)
self._do_link_test('cleanup',
(('ts', 0),),
None,
None, # we already made file linked to older part
None, # different inode but same timestamp: removed
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('2 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 2 removed, 0 errors)',
info_lines)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
# current-2 is linked so can be removed in cleanup
self.assertFalse(os.path.exists(older_filepath))
def test_cleanup_conflicting_ts_is_linked_to_part_power_minus_2_ok(self):
# link from next partition to current-2 partition;
# different file in current partition
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.rb.prepare_increase_partition_power()
self.rb.increase_partition_power()
self._save_ring()
# setup a file in the next part power (PART_POWER + 1) location that is
# linked to a file in an older (PART_POWER - 2) location
filename = '.'.join([self.obj_ts.internal, 'ts'])
older_filepath, new_filepath = self._make_link(filename,
PART_POWER - 2)
self._do_link_test('cleanup',
(('ts', 0),),
None,
None, # we already made file linked to older part
None,
(('ts', 0),),
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
# both the PART_POWER and PART_POWER - N partitions are visited, no new
# links are created, and both the older files are removed
self.assertIn('2 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 2 removed, 0 errors)',
info_lines)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual([], warning_lines)
with open(new_filepath, 'r') as fd:
self.assertEqual(older_filepath, fd.read())
self.assertFalse(os.path.exists(older_filepath))
def test_cleanup_conflicting_older_data_file(self):
# older conflicting file isn't relevant so cleanup succeeds
self._cleanup_test((('data', 0),),
(('data', 1),),
(('data', 0),), # different inode
None,
(('data', 1),), # cleanup_ondisk_files rm'd 0.data
exp_ret_code=0)
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 1 removed, 0 errors)',
info_lines)
def test_cleanup_conflicting_data_file_conflicting_meta_file(self):
self._cleanup_test((('data', 0), ('meta', 1)),
None,
(('data', 0), ('meta', 1)), # different inodes
(('data', 0), ('meta', 1)),
(('data', 0), ('meta', 1)),
exp_ret_code=1)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 0 removed, 2 errors)',
warning_lines)
def test_cleanup_conflicting_data_file_existing_meta_file(self):
# if just one link fails to be created then *nothing* is removed from
# old dir
self._cleanup_test((('data', 0), ('meta', 1)),
(('meta', 1),),
(('data', 0),), # different inode
(('data', 0), ('meta', 1)),
(('data', 0), ('meta', 1)),
exp_ret_code=1)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 0 removed, 1 errors)',
warning_lines)
def test_cleanup_first_quartile_does_rehash(self):
# we need object name in lower half of current part
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
self.assertLess(self.next_part, 2 ** PART_POWER)
self._common_test_cleanup()
# don't mock re-hash for variety (and so we can assert side-effects)
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# Old objectname should be removed, new should still exist
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
self.assertFalse(os.path.isfile(
os.path.join(self.objdir, self.object_fname)))
self.assertFalse(os.path.exists(self.part_dir))
with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
self.assertEqual(fp.read(), '')
with open(os.path.join(self.next_part_dir, 'hashes.pkl'), 'rb') as fp:
hashes = pickle.load(fp)
self.assertIn(self._hash[-3:], hashes)
# create an object in a first quartile partition and pretend it should
# be there; check that cleanup does not fail and does not remove the
# partition!
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
with mock.patch('swift.cli.relinker.replace_partition_in_path',
lambda *args, **kwargs: args[1]):
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.exists(self.objname))
def test_cleanup_second_quartile_no_rehash(self):
# we need a part in upper half of current part power
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self.assertGreaterEqual(self.part, 2 ** (PART_POWER - 1))
self._common_test_cleanup()
def fake_hash_suffix(suffix_dir, policy):
# check that the hash dir is empty and remove it just like the
# real _hash_suffix
self.assertEqual([self._hash], os.listdir(suffix_dir))
hash_dir = os.path.join(suffix_dir, self._hash)
self.assertEqual([], os.listdir(hash_dir))
os.rmdir(hash_dir)
os.rmdir(suffix_dir)
raise PathNotDir()
with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
side_effect=fake_hash_suffix) as mock_hash_suffix:
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# the old suffix dir is rehashed before the old partition is removed,
# but the new suffix dir is not rehashed
self.assertEqual([mock.call(self.suffix_dir, policy=self.policy)],
mock_hash_suffix.call_args_list)
# Old objectname should be removed, new should still exist
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
self.assertFalse(os.path.isfile(
os.path.join(self.objdir, self.object_fname)))
self.assertFalse(os.path.exists(self.part_dir))
with open(os.path.join(self.objects, str(self.next_part),
'hashes.invalid')) as fp:
self.assertEqual(fp.read(), '')
with open(os.path.join(self.objects, str(self.next_part),
'hashes.pkl'), 'rb') as fp:
hashes = pickle.load(fp)
self.assertIn(self._hash[-3:], hashes)
def test_cleanup_no_applicable_policy(self):
# NB do not prepare part power increase
self._save_ring()
with self._mock_relinker():
self.assertEqual(2, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
]))
self.assertEqual(self.logger.get_lines_for_level('warning'),
['No policy found to increase the partition power.'])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_not_mounted(self):
self._common_test_cleanup()
with self._mock_relinker():
self.assertEqual(1, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
]))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Skipping sda1 as it is not mounted',
'1 disks were unmounted',
'0 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 0 removed, 0 errors)',
])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_listdir_error(self):
self._common_test_cleanup()
with self._mock_relinker():
with self._mock_listdir():
self.assertEqual(1, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount-check'
]))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Skipping %s because ' % self.objects,
'There were 1 errors listing partition directories',
'0 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 0 removed, 1 errors)',
])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_device_filter(self):
self._common_test_cleanup()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--device', self.existing_device,
]))
# Old objectname should be removed, new should still exist
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
self.assertFalse(os.path.isfile(
os.path.join(self.objdir, self.object_fname)))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_device_filter_invalid(self):
self._common_test_cleanup()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
'--device', 'none',
]))
# Old objectname should still exist, new should still exist
self.assertTrue(os.path.isdir(self.expected_dir))
self.assertTrue(os.path.isfile(self.expected_file))
self.assertTrue(os.path.isfile(
os.path.join(self.objdir, self.object_fname)))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def _time_iter(self, start):
yield start
while True:
yield start + 1
@patch_policies(
[StoragePolicy(0, 'platinum', True),
ECStoragePolicy(
1, name='ec', is_default=False, ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
@mock.patch('os.getpid', return_value=100)
def test_relink_cleanup(self, mock_getpid):
# setup a policy-0 object in a part in the second quartile so that its
# next part *will not* be handled during cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
# create policy-1 object in a part in the first quartile so that its
# next part *will* be handled during cleanup
_hash, pol_1_part, pol_1_next_part, objpath = self._get_object_name(
lambda part: part < 2 ** (PART_POWER - 1))
self._create_object(POLICIES[1], pol_1_part, _hash)
state_files = {
POLICIES[0]: os.path.join(self.devices, self.existing_device,
'relink.objects.json'),
POLICIES[1]: os.path.join(self.devices, self.existing_device,
'relink.objects-1.json'),
}
self.rb.prepare_increase_partition_power()
self._save_ring()
ts1 = time.time()
with mock.patch('time.time', side_effect=self._time_iter(ts1)):
self.assertEqual(0, relinker.main([
'relink',
self.conf_file,
]))
orig_inodes = {}
for policy, part in zip(POLICIES,
(self.part, pol_1_part)):
state_file = state_files[policy]
orig_inodes[policy] = os.stat(state_file).st_ino
state = {str(part): True}
with open(state_files[policy], 'rt') as f:
self.assertEqual(json.load(f), {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": state})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data = {
'devices': {'sda1': {'parts_done': 2,
'policies': {'0': {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER,
'parts_done': 1,
'start_time': mock.ANY,
'stats': {'errors': 0,
'files': 1,
'hash_dirs': 1,
'linked': 1,
'removed': 0},
'step': 'relink',
'timestamp': mock.ANY,
'total_parts': 1,
'total_time': 0.0},
'1': {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER,
'parts_done': 1,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 1,
'hash_dirs': 1,
'linked': 1,
'removed': 0},
'step': 'relink',
'timestamp': mock.ANY,
'total_parts': 1,
'total_time': 0.0}},
'start_time': mock.ANY,
'stats': {'errors': 0,
'files': 2,
'hash_dirs': 2,
'linked': 2,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0}},
'workers': {'100': {'devices': ['sda1'],
'return_code': 0,
'timestamp': mock.ANY}}}
self.assertEqual(recon_progress, expected_recon_data)
self.rb.increase_partition_power()
self.rb._ring = None # Force builder to reload ring
self._save_ring()
with open(state_files[0], 'rt'), open(state_files[1], 'rt'):
# Keep the state files open during cleanup so the inode can't be
# released/re-used when it gets unlinked
self.assertEqual(orig_inodes[0], os.stat(state_files[0]).st_ino)
self.assertEqual(orig_inodes[1], os.stat(state_files[1]).st_ino)
ts1 = time.time()
with mock.patch('time.time', side_effect=self._time_iter(ts1)):
self.assertEqual(0, relinker.main([
'cleanup',
self.conf_file,
]))
self.assertNotEqual(orig_inodes[0], os.stat(state_files[0]).st_ino)
self.assertNotEqual(orig_inodes[1], os.stat(state_files[1]).st_ino)
for policy, part, next_part in zip(POLICIES,
(self.part, pol_1_part),
(None, pol_1_next_part)):
state_file = state_files[policy]
state = {str(part): True}
if next_part is not None:
# cleanup will process the new partition as well as the old if
# old is in first quartile
state[str(next_part)] = True
with open(state_file, 'rt') as f:
# NB: part_power/next_part_power tuple changed, so state was
# reset (though we track prev_part_power for an efficient clean
# up)
self.assertEqual(json.load(f), {
"prev_part_power": PART_POWER,
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": state})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data = {
'devices': {'sda1': {'parts_done': 3,
'policies': {'0': {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER + 1,
'parts_done': 1,
'start_time': mock.ANY,
'stats': {'errors': 0,
'files': 1,
'hash_dirs': 1,
'linked': 0,
'removed': 1},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 1,
'total_time': 0.0},
'1': {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER + 1,
'parts_done': 2,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 1,
'hash_dirs': 1,
'linked': 0,
'removed': 1},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0.0}},
'start_time': mock.ANY,
'stats': {'errors': 0,
'files': 2,
'hash_dirs': 2,
'linked': 0,
'removed': 2},
'timestamp': mock.ANY,
'total_parts': 3,
'total_time': 0}},
'workers': {'100': {'devices': ['sda1'],
'return_code': 0,
'timestamp': mock.ANY}}}
self.assertEqual(recon_progress, expected_recon_data)
def test_devices_filter_filtering(self):
# With no filtering, returns all devices
r = relinker.Relinker(
{'devices': self.devices,
'recon_cache_path': self.recon_cache_path},
self.logger, self.existing_device)
devices = r.devices_filter("", [self.existing_device])
self.assertEqual(set([self.existing_device]), devices)
# With a matching filter, returns what is matching
devices = r.devices_filter("", [self.existing_device, 'sda2'])
self.assertEqual(set([self.existing_device]), devices)
# With a non matching filter, returns nothing
r.device_list = ['none']
devices = r.devices_filter("", [self.existing_device])
self.assertEqual(set(), devices)
def test_hook_pre_post_device_locking(self):
r = relinker.Relinker(
{'devices': self.devices,
'recon_cache_path': self.recon_cache_path},
self.logger, self.existing_device)
device_path = os.path.join(self.devices, self.existing_device)
r.datadir = 'object' # would get set in process_policy
r.states = {"state": {}, "part_power": PART_POWER,
"next_part_power": PART_POWER + 1} # ditto
lock_file = os.path.join(device_path, '.relink.%s.lock' % r.datadir)
r.policy = self.policy
# The first run gets the lock
r.hook_pre_device(device_path)
self.assertIsNotNone(r.dev_lock)
# A following run would block
with self.assertRaises(IOError) as raised:
with open(lock_file, 'a') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertEqual(errno.EAGAIN, raised.exception.errno)
# Another must not get the lock, so it must return an empty list
r.hook_post_device(device_path)
self.assertIsNone(r.dev_lock)
with open(lock_file, 'a') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
def _test_state_file(self, pol, expected_recon_data):
r = relinker.Relinker(
{'devices': self.devices,
'recon_cache_path': self.recon_cache_path,
'stats_interval': 0.0},
self.logger, [self.existing_device])
device_path = os.path.join(self.devices, self.existing_device)
r.datadir = 'objects'
r.part_power = PART_POWER
r.next_part_power = PART_POWER + 1
datadir_path = os.path.join(device_path, r.datadir)
state_file = os.path.join(device_path, 'relink.%s.json' % r.datadir)
r.policy = pol
r.pid = 1234 # for recon workers stats
recon_progress = utils.load_recon_cache(self.recon_cache)
# the progress for the current policy should be gone. So we should
# just have anything from any other process polices.. if any.
self.assertEqual(recon_progress, expected_recon_data)
# Start relinking
r.states = {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {},
}
# Load the states: As it starts, it must be empty
r.hook_pre_device(device_path)
self.assertEqual({}, r.states["state"])
os.close(r.dev_lock) # Release the lock
# Partition 312 is ignored because it must have been created with the
# next_part_power, so it does not need to be relinked
# 96 and 227 are reverse ordered
# auditor_status_ALL.json is ignored because it's not a partition
self.assertEqual(['227', '96'], r.partitions_filter(
"", ['96', '227', '312', 'auditor_status.json']))
self.assertEqual(r.states["state"], {'96': False, '227': False})
r.diskfile_mgr = DiskFileRouter({
'devices': self.devices,
'mount_check': False,
}, self.logger)[r.policy]
# Ack partition 96
r.hook_pre_partition(os.path.join(datadir_path, '96'))
r.hook_post_partition(os.path.join(datadir_path, '96'))
self.assertEqual(r.states["state"], {'96': True, '227': False})
self.assertEqual(self.logger.get_lines_for_level("info"), [
"Step: relink Device: sda1 Policy: %s "
"Partitions: 1/2" % r.policy.name,
])
with open(state_file, 'rt') as f:
self.assertEqual(json.load(f), {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {'96': True, '227': False}})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update(
{'devices': {
'sda1': {
'parts_done': 1,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER,
'parts_done': 1,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'relink',
'timestamp': mock.ANY,
'total_parts': 2}},
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0}},
'workers': {
'1234': {'timestamp': mock.ANY,
'return_code': None,
'devices': ['sda1']}}})
self.assertEqual(recon_progress, expected_recon_data)
# Restart relinking after only part 96 was done
self.logger.clear()
self.assertEqual(['227'],
r.partitions_filter("", ['96', '227', '312']))
self.assertEqual(r.states["state"], {'96': True, '227': False})
# ...but there's an error
r.hook_pre_partition(os.path.join(datadir_path, '227'))
r.stats['errors'] += 1
r.hook_post_partition(os.path.join(datadir_path, '227'))
self.assertEqual(self.logger.get_lines_for_level("info"), [
"Step: relink Device: sda1 Policy: %s "
"Partitions: 1/2" % r.policy.name,
])
self.assertEqual(r.states["state"], {'96': True, '227': False})
with open(state_file, 'rt') as f:
self.assertEqual(json.load(f), {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {'96': True, '227': False}})
# OK, one more try
self.logger.clear()
self.assertEqual(['227'],
r.partitions_filter("", ['96', '227', '312']))
self.assertEqual(r.states["state"], {'96': True, '227': False})
# Ack partition 227
r.hook_pre_partition(os.path.join(datadir_path, '227'))
r.hook_post_partition(os.path.join(datadir_path, '227'))
self.assertEqual(self.logger.get_lines_for_level("info"), [
"Step: relink Device: sda1 Policy: %s "
"Partitions: 2/2" % r.policy.name,
])
self.assertEqual(r.states["state"], {'96': True, '227': True})
with open(state_file, 'rt') as f:
self.assertEqual(json.load(f), {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {'96': True, '227': True}})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update(
{'devices': {
'sda1': {
'parts_done': 2,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER,
'parts_done': 2,
'start_time': mock.ANY,
'stats': {
'errors': 1,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'relink',
'timestamp': mock.ANY,
'total_parts': 2}},
'start_time': mock.ANY,
'stats': {
'errors': 1,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0}}})
self.assertEqual(recon_progress, expected_recon_data)
# If the process restarts, it reload the state
r.states = {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {},
}
r.hook_pre_device(device_path)
self.assertEqual(r.states, {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {'96': True, '227': True}})
os.close(r.dev_lock) # Release the lock
# Start cleanup -- note that part_power and next_part_power now match!
r.do_cleanup = True
r.part_power = PART_POWER + 1
r.states = {
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": {},
}
# ...which means our state file was ignored
r.hook_pre_device(device_path)
self.assertEqual(r.states, {
"prev_part_power": PART_POWER,
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": {}})
os.close(r.dev_lock) # Release the lock
self.assertEqual(['227', '96'],
r.partitions_filter("", ['96', '227', '312']))
# Ack partition 227
r.hook_pre_partition(os.path.join(datadir_path, '227'))
r.hook_post_partition(os.path.join(datadir_path, '227'))
self.assertIn("Step: cleanup Device: sda1 Policy: %s "
"Partitions: 1/2" % r.policy.name,
self.logger.get_lines_for_level("info"))
self.assertEqual(r.states["state"],
{'96': False, '227': True})
with open(state_file, 'rt') as f:
self.assertEqual(json.load(f), {
"prev_part_power": PART_POWER,
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": {'96': False, '227': True}})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update(
{'devices': {
'sda1': {
'parts_done': 1,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER + 1,
'parts_done': 1,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 2}},
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0}}})
self.assertEqual(recon_progress, expected_recon_data)
# Restart cleanup after only part 227 was done
self.assertEqual(['96'], r.partitions_filter("", ['96', '227', '312']))
self.assertEqual(r.states["state"],
{'96': False, '227': True})
# Ack partition 96
r.hook_post_partition(os.path.join(datadir_path, '96'))
self.assertIn("Step: cleanup Device: sda1 Policy: %s "
"Partitions: 2/2" % r.policy.name,
self.logger.get_lines_for_level("info"))
self.assertEqual(r.states["state"],
{'96': True, '227': True})
with open(state_file, 'rt') as f:
self.assertEqual(json.load(f), {
"prev_part_power": PART_POWER,
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": {'96': True, '227': True}})
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update(
{'devices': {
'sda1': {
'parts_done': 2,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER + 1,
'parts_done': 2,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 2}},
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 2,
'total_time': 0}}})
self.assertEqual(recon_progress, expected_recon_data)
# At the end, the state is still accurate
r.states = {
"prev_part_power": PART_POWER,
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 1,
"state": {},
}
r.hook_pre_device(device_path)
self.assertEqual(r.states["state"],
{'96': True, '227': True})
os.close(r.dev_lock) # Release the lock
# If the part_power/next_part_power tuple differs, restart from scratch
r.states = {
"part_power": PART_POWER + 1,
"next_part_power": PART_POWER + 2,
"state": {},
}
r.hook_pre_device(device_path)
self.assertEqual(r.states["state"], {})
self.assertFalse(os.path.exists(state_file))
# this will also reset the recon stats
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update({
'devices': {
'sda1': {
'parts_done': 0,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 2,
'part_power': PART_POWER + 1,
'parts_done': 0,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 0}},
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 0,
'total_time': 0}}})
self.assertEqual(recon_progress, expected_recon_data)
os.close(r.dev_lock) # Release the lock
# If the file gets corrupted, restart from scratch
with open(state_file, 'wt') as f:
f.write('NOT JSON')
r.states = {
"part_power": PART_POWER,
"next_part_power": PART_POWER + 1,
"state": {},
}
r.hook_pre_device(device_path)
self.assertEqual(r.states["state"], {})
self.assertFalse(os.path.exists(state_file))
recon_progress = utils.load_recon_cache(self.recon_cache)
expected_recon_data.update({
'devices': {
'sda1': {
'parts_done': 0,
'policies': {
str(pol.idx): {
'next_part_power': PART_POWER + 1,
'part_power': PART_POWER,
'parts_done': 0,
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'step': 'cleanup',
'timestamp': mock.ANY,
'total_parts': 0}},
'start_time': mock.ANY,
'stats': {
'errors': 0,
'files': 0,
'hash_dirs': 0,
'linked': 0,
'removed': 0},
'timestamp': mock.ANY,
'total_parts': 0,
'total_time': 0}}})
self.assertEqual(recon_progress, expected_recon_data)
os.close(r.dev_lock) # Release the lock
return expected_recon_data
@patch_policies(
[StoragePolicy(0, 'platinum', True),
ECStoragePolicy(
1, name='ec', is_default=False, ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_state_file(self):
expected_recon_data = {}
for policy in POLICIES:
# because we specifying a device, it should be itself reset
expected_recon_data = self._test_state_file(
policy, expected_recon_data)
self.logger.clear()
def test_cleanup_relinked_ok(self):
self._common_test_cleanup()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(self.expected_file)) # link intact
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 1 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_not_yet_relinked(self):
# force new partition to be above range of partitions visited during
# cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self._common_test_cleanup(relink=False)
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(self.expected_file)) # link created
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
self.assertIn(
'Relinking (cleanup) created link: %s to %s'
% (self.objname, self.expected_file),
self.logger.get_lines_for_level('debug'))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)', info_lines)
# suffix should be invalidated and rehashed in new partition
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertTrue(os.path.exists(hashes_invalid))
with open(hashes_invalid, 'r') as fd:
self.assertEqual('', fd.read().strip())
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_not_yet_relinked_low(self):
# force new partition to be in the range of partitions visited during
# cleanup, but not exist until after cleanup would have visited it
self._setup_object(lambda part: part < 2 ** (PART_POWER - 1))
self._common_test_cleanup(relink=False)
self.assertFalse(os.path.isfile(self.expected_file))
self.assertFalse(os.path.exists(self.next_part_dir))
# Relinker processes partitions in reverse order; as a result, the
# "normal" rehash during cleanup won't hit this, since it doesn't
# exist yet -- but when we finish processing the old partition,
# we'll loop back around.
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(self.expected_file)) # link created
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
self.assertIn(
'Relinking (cleanup) created link: %s to %s'
% (self.objname, self.expected_file),
self.logger.get_lines_for_level('debug'))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)', info_lines)
# suffix should be invalidated and rehashed in new partition
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertTrue(os.path.exists(hashes_invalid))
with open(hashes_invalid, 'r') as fd:
self.assertEqual('', fd.read().strip())
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_same_object_different_inode_in_new_partition(self):
# force rehash of new partition to not happen during cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self._common_test_cleanup(relink=False)
# new file in the new partition but different inode
os.makedirs(self.expected_dir)
with open(self.expected_file, 'w') as fd:
fd.write('same but different')
with self._mock_relinker():
res = relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
])
self.assertEqual(1, res)
self.assertTrue(os.path.isfile(self.objname))
with open(self.objname, 'r') as fd:
self.assertEqual('Hello World!', fd.read())
self.assertTrue(os.path.isfile(self.expected_file))
with open(self.expected_file, 'r') as fd:
self.assertEqual('same but different', fd.read())
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(2, len(warning_lines), warning_lines)
self.assertIn('Error relinking (cleanup): failed to relink %s to %s'
% (self.objname, self.expected_file), warning_lines[0])
# suffix should not be invalidated in new partition
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertFalse(os.path.exists(hashes_invalid))
self.assertEqual('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 0 removed, 1 errors)',
warning_lines[1])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_older_object_in_new_partition(self):
# relink of the current object failed, but there is an older version of
# same object in the new partition
# force rehash of new partition to not happen during cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self._common_test_cleanup(relink=False)
os.makedirs(self.expected_dir)
older_obj_file = os.path.join(
self.expected_dir,
utils.Timestamp(int(self.obj_ts) - 1).internal + '.data')
with open(older_obj_file, "wb") as fd:
fd.write(b"Hello Olde Worlde!")
write_metadata(fd, {'name': self.obj_path, 'Content-Length': '18'})
with self._mock_relinker():
res = relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
])
self.assertEqual(0, res)
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
# which is also going to clean up the older file
self.assertFalse(os.path.isfile(older_obj_file))
self.assertTrue(os.path.isfile(self.expected_file)) # link created
self.assertIn(
'Relinking (cleanup) created link: %s to %s'
% (self.objname, self.expected_file),
self.logger.get_lines_for_level('debug'))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)', info_lines)
# suffix should be invalidated and rehashed in new partition
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertTrue(os.path.exists(hashes_invalid))
with open(hashes_invalid, 'r') as fd:
self.assertEqual('', fd.read().strip())
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_deleted(self):
# force rehash of new partition to not happen during cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self._common_test_cleanup()
# rehash during relink creates hashes.invalid...
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertTrue(os.path.exists(hashes_invalid))
# Pretend the object got deleted in between and there is a tombstone
# note: the tombstone would normally be at a newer timestamp but here
# we make the tombstone at same timestamp - it is treated as the
# 'required' file in the new partition, so the .data is deleted in the
# old partition
fname_ts = self.expected_file[:-4] + "ts"
os.rename(self.expected_file, fname_ts)
self.assertTrue(os.path.isfile(fname_ts))
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(fname_ts))
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
# suffix should not be invalidated in new partition
self.assertTrue(os.path.exists(hashes_invalid))
with open(hashes_invalid, 'r') as fd:
self.assertEqual('', fd.read().strip())
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(0 files, 0 linked, 1 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_old_part_careful_file(self):
self._common_test_cleanup()
# make some extra junk file in the part
extra_file = os.path.join(self.part_dir, 'extra')
with open(extra_file, 'w'):
pass
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# old partition can't be cleaned up
self.assertTrue(os.path.exists(self.part_dir))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_old_part_careful_dir(self):
self._common_test_cleanup()
# make some extra junk directory in the part
extra_dir = os.path.join(self.part_dir, 'extra')
os.mkdir(extra_dir)
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
# old partition can't be cleaned up
self.assertTrue(os.path.exists(self.part_dir))
self.assertTrue(os.path.exists(extra_dir))
def test_cleanup_old_part_replication_lock_taken(self):
# verify that relinker must take the replication lock before deleting
# it, and handles the LockTimeout when unable to take it
self._common_test_cleanup()
config = """
[DEFAULT]
swift_dir = %s
devices = %s
mount_check = false
replication_lock_timeout = 1
[object-relinker]
""" % (self.testdir, self.devices)
conf_file = os.path.join(self.testdir, 'relinker.conf')
with open(conf_file, 'w') as f:
f.write(dedent(config))
with utils.lock_path(self.part_dir, name='replication'):
# lock taken so relinker should be unable to remove the lock file
with self._mock_relinker():
self.assertEqual(0, relinker.main(['cleanup', conf_file]))
# old partition can't be cleaned up
self.assertTrue(os.path.exists(self.part_dir))
self.assertTrue(os.path.exists(
os.path.join(self.part_dir, '.lock-replication')))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_old_part_partition_lock_taken_during_get_hashes(self):
# verify that relinker handles LockTimeouts when rehashing
self._common_test_cleanup()
config = """
[DEFAULT]
swift_dir = %s
devices = %s
mount_check = false
replication_lock_timeout = 1
[object-relinker]
""" % (self.testdir, self.devices)
conf_file = os.path.join(self.testdir, 'relinker.conf')
with open(conf_file, 'w') as f:
f.write(dedent(config))
orig_get_hashes = BaseDiskFileManager.get_hashes
def new_get_hashes(*args, **kwargs):
# lock taken so relinker should be unable to rehash
with utils.lock_path(self.part_dir):
return orig_get_hashes(*args, **kwargs)
with self._mock_relinker(), \
mock.patch('swift.common.utils.DEFAULT_LOCK_TIMEOUT', 0.1), \
mock.patch.object(BaseDiskFileManager,
'get_hashes', new_get_hashes):
self.assertEqual(0, relinker.main(['cleanup', conf_file]))
# old partition can't be cleaned up
self.assertTrue(os.path.exists(self.part_dir))
self.assertTrue(os.path.exists(
os.path.join(self.part_dir, '.lock')))
self.assertEqual([], self.logger.get_lines_for_level('error'))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
def test_cleanup_old_part_lock_taken_between_get_hashes_and_rm(self):
# verify that relinker must take the partition lock before deleting
# it, and handles the LockTimeout when unable to take it
self._common_test_cleanup()
config = """
[DEFAULT]
swift_dir = %s
devices = %s
mount_check = false
replication_lock_timeout = 1
[object-relinker]
""" % (self.testdir, self.devices)
conf_file = os.path.join(self.testdir, 'relinker.conf')
with open(conf_file, 'w') as f:
f.write(dedent(config))
orig_replication_lock = BaseDiskFileManager.replication_lock
@contextmanager
def new_lock(*args, **kwargs):
# lock taken so relinker should be unable to rehash
with utils.lock_path(self.part_dir):
with orig_replication_lock(*args, **kwargs) as cm:
yield cm
with self._mock_relinker(), \
mock.patch('swift.common.utils.DEFAULT_LOCK_TIMEOUT', 0.1), \
mock.patch.object(BaseDiskFileManager,
'replication_lock', new_lock):
self.assertEqual(0, relinker.main(['cleanup', conf_file]))
# old partition can't be cleaned up
self.assertTrue(os.path.exists(self.part_dir))
self.assertTrue(os.path.exists(
os.path.join(self.part_dir, '.lock')))
self.assertEqual([], self.logger.get_lines_for_level('error'))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
def test_cleanup_old_part_robust(self):
self._common_test_cleanup()
orig_get_hashes = DiskFileManager.get_hashes
calls = []
def mock_get_hashes(mgr, device, part, suffixes, policy):
orig_resp = orig_get_hashes(mgr, device, part, suffixes, policy)
if part == self.part:
expected_files = ['.lock', 'hashes.pkl', 'hashes.invalid']
self.assertEqual(set(expected_files),
set(os.listdir(self.part_dir)))
# unlink a random file, should be empty
os.unlink(os.path.join(self.part_dir, 'hashes.pkl'))
# create an ssync replication lock, too
with open(os.path.join(self.part_dir,
'.lock-replication'), 'w'):
pass
calls.append(True)
elif part == self.next_part:
# sometimes our random obj needs to rehash the next part too
pass
else:
self.fail('Unexpected call to get_hashes for %r' % part)
return orig_resp
with mock.patch.object(DiskFileManager, 'get_hashes', mock_get_hashes):
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertEqual([True], calls)
# old partition can still be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_reapable(self):
# relink a tombstone
fname_ts = self.objname[:-4] + "ts"
os.rename(self.objname, fname_ts)
self.objname = fname_ts
self.expected_file = self.expected_file[:-4] + "ts"
self._common_test_cleanup()
self.assertTrue(os.path.exists(self.expected_file)) # sanity check
with self._mock_relinker(), \
mock.patch('time.time', return_value=1e10 - 1): # far future
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertEqual(self.logger.get_lines_for_level('error'), [])
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
# reclaimed during relinker cleanup...
self.assertFalse(os.path.exists(self.objname))
# reclaimed during relinker relink or relinker cleanup, depending on
# which quartile the partition is in ...
self.assertFalse(os.path.exists(self.expected_file))
def test_cleanup_new_does_not_exist(self):
self._common_test_cleanup()
# Pretend the file in the new place got deleted in between relink and
# cleanup: cleanup should re-create the link
os.remove(self.expected_file)
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(self.expected_file)) # link created
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
self.assertIn(
'Relinking (cleanup) created link: %s to %s'
% (self.objname, self.expected_file),
self.logger.get_lines_for_level('debug'))
self.assertEqual([], self.logger.get_lines_for_level('warning'))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_new_does_not_exist_and_relink_fails(self):
# force rehash of new partition to not happen during cleanup
self._setup_object(lambda part: part >= 2 ** (PART_POWER - 1))
self._common_test_cleanup()
# rehash during relink creates hashes.invalid...
hashes_invalid = os.path.join(self.next_part_dir, 'hashes.invalid')
self.assertTrue(os.path.exists(hashes_invalid))
# Pretend the file in the new place got deleted in between relink and
# cleanup: cleanup attempts to re-create the link but fails
os.remove(self.expected_file)
with mock.patch('swift.obj.diskfile.os.link', side_effect=OSError):
with self._mock_relinker():
self.assertEqual(1, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertFalse(os.path.isfile(self.expected_file))
self.assertTrue(os.path.isfile(self.objname)) # old file intact
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Error relinking (cleanup): failed to relink %s to %s: '
% (self.objname, self.expected_file),
'1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 0 removed, 1 errors)',
])
# suffix should not be invalidated in new partition
self.assertTrue(os.path.exists(hashes_invalid))
with open(hashes_invalid, 'r') as fd:
self.assertEqual('', fd.read().strip())
# nor in the old partition
old_hashes_invalid = os.path.join(self.part_dir, 'hashes.invalid')
self.assertFalse(os.path.exists(old_hashes_invalid))
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_remove_fails(self):
meta_file = utils.Timestamp(int(self.obj_ts) + 1).internal + '.meta'
old_meta_path = os.path.join(self.objdir, meta_file)
new_meta_path = os.path.join(self.expected_dir, meta_file)
with open(old_meta_path, 'w') as fd:
fd.write('meta file in old partition')
self._common_test_cleanup()
calls = []
orig_remove = os.remove
def mock_remove(path, *args, **kwargs):
calls.append(path)
if len(calls) == 1:
raise OSError
return orig_remove(path)
with mock.patch('swift.obj.diskfile.os.remove', mock_remove):
with self._mock_relinker():
self.assertEqual(1, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertEqual([old_meta_path, self.objname], calls)
self.assertTrue(os.path.isfile(self.expected_file)) # new file intact
self.assertTrue(os.path.isfile(new_meta_path)) # new file intact
self.assertFalse(os.path.isfile(self.objname)) # old file removed
self.assertTrue(os.path.isfile(old_meta_path)) # meta file remove fail
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Error cleaning up %s: OSError()' % old_meta_path,
'1 hash dirs processed (cleanup=True) '
'(2 files, 0 linked, 1 removed, 1 errors)',
])
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_cleanup_two_files_need_linking(self):
meta_file = utils.Timestamp(int(self.obj_ts) + 1).internal + '.meta'
old_meta_path = os.path.join(self.objdir, meta_file)
new_meta_path = os.path.join(self.expected_dir, meta_file)
with open(old_meta_path, 'w') as fd:
fd.write('unexpected file in old partition')
self._common_test_cleanup(relink=False)
self.assertFalse(os.path.isfile(self.expected_file)) # link missing
self.assertFalse(os.path.isfile(new_meta_path)) # link missing
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
self.assertTrue(os.path.isfile(self.expected_file)) # new file created
self.assertTrue(os.path.isfile(new_meta_path)) # new file created
self.assertFalse(os.path.isfile(self.objname)) # old file removed
self.assertFalse(os.path.isfile(old_meta_path)) # meta file removed
self.assertEqual([], self.logger.get_lines_for_level('warning'))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(2 files, 2 linked, 2 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
@patch_policies(
[ECStoragePolicy(
0, name='platinum', is_default=True, ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_cleanup_diskfile_error(self):
# Switch the policy type so all fragments raise DiskFileError: they
# are included in the diskfile data as 'unexpected' files and cleanup
# should include them
self._common_test_cleanup()
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
log_lines = self.logger.get_lines_for_level('warning')
# The error is logged six times:
# during _common_test_cleanup() relink: once for cleanup_ondisk_files
# in old and once for get_ondisk_files of union of files;
# during cleanup: once for cleanup_ondisk_files in old and new
# location, once for get_ondisk_files of union of files;
# during either relink or cleanup: once for the rehash of the new
# partition
self.assertEqual(6, len(log_lines),
'Expected 6 log lines, got %r' % log_lines)
for line in log_lines:
self.assertIn('Bad fragment index: None', line, log_lines)
self.assertTrue(os.path.isfile(self.expected_file)) # new file intact
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 0 linked, 1 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
@patch_policies(
[ECStoragePolicy(
0, name='platinum', is_default=True, ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2)])
def test_cleanup_diskfile_error_new_file_missing(self):
self._common_test_cleanup(relink=False)
# Switch the policy type so all fragments raise DiskFileError: they
# are included in the diskfile data as 'unexpected' files and cleanup
# should include them
with self._mock_relinker():
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
warning_lines = self.logger.get_lines_for_level('warning')
# once for cleanup_ondisk_files in old, again for the get_ondisk_files
# of union of files, and one last time when the new partition gets
# rehashed at the end of processing the old one
self.assertEqual(3, len(warning_lines),
'Expected 3 log lines, got %r' % warning_lines)
for line in warning_lines:
self.assertIn('Bad fragment index: None', line, warning_lines)
self.assertIn(
'Relinking (cleanup) created link: %s to %s'
% (self.objname, self.expected_file),
self.logger.get_lines_for_level('debug'))
self.assertTrue(os.path.isfile(self.expected_file)) # new file intact
# old partition should be cleaned up
self.assertFalse(os.path.exists(self.part_dir))
info_lines = self.logger.get_lines_for_level('info')
self.assertIn('1 hash dirs processed (cleanup=True) '
'(1 files, 1 linked, 1 removed, 0 errors)', info_lines)
self.assertEqual([], self.logger.get_lines_for_level('error'))
def test_rehashing(self):
calls = []
@contextmanager
def do_mocks():
orig_invalidate = relinker.diskfile.invalidate_hash
orig_get_hashes = DiskFileManager.get_hashes
def mock_invalidate(suffix_dir):
calls.append(('invalidate', suffix_dir))
return orig_invalidate(suffix_dir)
def mock_get_hashes(self, *args):
calls.append(('get_hashes', ) + args)
return orig_get_hashes(self, *args)
with mock.patch.object(relinker.diskfile, 'invalidate_hash',
mock_invalidate), \
mock.patch.object(DiskFileManager, 'get_hashes',
mock_get_hashes):
with self._mock_relinker():
yield
with do_mocks():
self.rb.prepare_increase_partition_power()
self._save_ring()
self.assertEqual(0, relinker.main([
'relink',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
expected = [('invalidate', self.next_suffix_dir)]
if self.part >= 2 ** (PART_POWER - 1):
expected.append(('get_hashes', self.existing_device,
self.next_part, [], POLICIES[0]))
self.assertEqual(calls, expected)
# Depending on partition, there may or may not be a get_hashes here
self.rb._ring = None # Force builder to reload ring
self.rb.increase_partition_power()
self._save_ring()
self.assertEqual(0, relinker.main([
'cleanup',
'--swift-dir', self.testdir,
'--devices', self.devices,
'--skip-mount',
]))
if self.part < 2 ** (PART_POWER - 1):
expected.append(('get_hashes', self.existing_device,
self.next_part, [], POLICIES[0]))
expected.extend([
('invalidate', self.suffix_dir),
('get_hashes', self.existing_device, self.part, [],
POLICIES[0]),
])
self.assertEqual(calls, expected)
self.assertEqual([], self.logger.get_lines_for_level('error'))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/cli/test_relinker.py |
swift-master | test/unit/cli/__init__.py |
|
# Copyright (c) 2013 Christian Schwede <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import os
import random
import re
import tempfile
import time
import unittest
import shutil
import string
import sys
import six
from eventlet.green import socket
from six import StringIO
from six.moves import urllib
from swift.cli import recon
from swift.common import utils
from swift.common.ring import builder
from swift.common.ring import utils as ring_utils
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit import patch_policies
if six.PY3:
from eventlet.green.urllib import request as urllib2
GREEN_URLLIB_URLOPEN = 'eventlet.green.urllib.request.urlopen'
else:
from eventlet.green import urllib2
GREEN_URLLIB_URLOPEN = 'eventlet.green.urllib2.urlopen'
class TestHelpers(unittest.TestCase):
def test_seconds2timeunit(self):
self.assertEqual(recon.seconds2timeunit(10), (10, 'seconds'))
self.assertEqual(recon.seconds2timeunit(600), (10, 'minutes'))
self.assertEqual(recon.seconds2timeunit(36000), (10, 'hours'))
self.assertEqual(recon.seconds2timeunit(60 * 60 * 24 * 10),
(10, 'days'))
def test_size_suffix(self):
self.assertEqual(recon.size_suffix(5 * 10 ** 2), '500 bytes')
self.assertEqual(recon.size_suffix(5 * 10 ** 3), '5 kB')
self.assertEqual(recon.size_suffix(5 * 10 ** 6), '5 MB')
self.assertEqual(recon.size_suffix(5 * 10 ** 9), '5 GB')
self.assertEqual(recon.size_suffix(5 * 10 ** 12), '5 TB')
self.assertEqual(recon.size_suffix(5 * 10 ** 15), '5 PB')
self.assertEqual(recon.size_suffix(5 * 10 ** 18), '5 EB')
self.assertEqual(recon.size_suffix(5 * 10 ** 21), '5 ZB')
class TestScout(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.scout_instance = recon.Scout("type", suppress_errors=True)
self.url = 'http://127.0.0.1:8080/recon/type'
self.server_type_url = 'http://127.0.0.1:8080/'
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_ok(self, mock_urlopen):
mock_urlopen.return_value.read = lambda: json.dumps([])
url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertEqual(url, self.url)
self.assertEqual(content, [])
self.assertEqual(status, 200)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("")
url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertIsInstance(content, urllib2.URLError)
self.assertEqual(url, self.url)
self.assertEqual(status, -1)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError(
self.url, 404, "Internal error", None, None)
url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertEqual(url, self.url)
self.assertIsInstance(content, urllib2.HTTPError)
self.assertEqual(status, 404)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_socket_timeout(self, mock_urlopen):
mock_urlopen.side_effect = socket.timeout("timeout")
url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080"))
self.assertIsInstance(content, socket.timeout)
self.assertEqual(url, self.url)
self.assertEqual(status, -1)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_server_type_ok(self, mock_urlopen):
def getheader(name):
d = {'Server': 'server-type'}
return d.get(name)
mock_urlopen.return_value.info.return_value.get = getheader
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertEqual(content, 'server-type')
self.assertEqual(status, 200)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_server_type_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("")
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertIsInstance(content, urllib2.URLError)
self.assertEqual(url, self.server_type_url)
self.assertEqual(status, -1)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_server_type_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError(
self.server_type_url, 404, "Internal error", None, None)
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertEqual(url, self.server_type_url)
self.assertIsInstance(content, urllib2.HTTPError)
self.assertEqual(status, 404)
@mock.patch(GREEN_URLLIB_URLOPEN)
def test_scout_server_type_socket_timeout(self, mock_urlopen):
mock_urlopen.side_effect = socket.timeout("timeout")
url, content, status = self.scout_instance.scout_server_type(
("127.0.0.1", "8080"))
self.assertIsInstance(content, socket.timeout)
self.assertEqual(url, self.server_type_url)
self.assertEqual(status, -1)
@patch_policies
class TestRecon(unittest.TestCase):
def setUp(self, *_args, **_kwargs):
self.swift_conf_file = utils.SWIFT_CONF_FILE
self.recon_instance = recon.SwiftRecon()
self.swift_dir = tempfile.mkdtemp()
self.ring_name = POLICIES.legacy.ring_name
self.tmpfile_name = os.path.join(
self.swift_dir, self.ring_name + '.ring.gz')
self.ring_name2 = POLICIES[1].ring_name
self.tmpfile_name2 = os.path.join(
self.swift_dir, self.ring_name2 + '.ring.gz')
swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
swift_conf_data = '''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = unu
aliases = %s
''' % self.policy_name
with open(swift_conf, "wb") as sc:
sc.write(swift_conf_data.encode('utf8'))
def tearDown(self, *_args, **_kwargs):
utils.SWIFT_CONF_FILE = self.swift_conf_file
shutil.rmtree(self.swift_dir, ignore_errors=True)
def _make_object_rings(self):
ringbuilder = builder.RingBuilder(2, 3, 1)
devs = [
'r0z0-127.0.0.1:10000/sda1',
'r0z1-127.0.0.1:10001/sda1',
'r1z0-127.0.0.1:10002/sda1',
'r1z1-127.0.0.1:10003/sda1',
]
for raw_dev_str in devs:
dev = ring_utils.parse_add_value(raw_dev_str)
dev['weight'] = 1.0
ringbuilder.add_dev(dev)
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name)
ringbuilder = builder.RingBuilder(2, 2, 1)
devs = [
'r0z0-127.0.0.1:10000/sda1',
'r0z1-127.0.0.2:10004/sda1',
]
for raw_dev_str in devs:
dev = ring_utils.parse_add_value(raw_dev_str)
dev['weight'] = 1.0
ringbuilder.add_dev(dev)
ringbuilder.rebalance()
ringbuilder.get_ring().save(self.tmpfile_name2)
def test_gen_stats(self):
stats = self.recon_instance._gen_stats((1, 4, 10, None), 'Sample')
self.assertEqual(stats.get('name'), 'Sample')
self.assertEqual(stats.get('average'), 5.0)
self.assertEqual(stats.get('high'), 10)
self.assertEqual(stats.get('reported'), 3)
self.assertEqual(stats.get('low'), 1)
self.assertEqual(stats.get('total'), 15)
self.assertEqual(stats.get('number_none'), 1)
self.assertEqual(stats.get('perc_none'), 25.0)
def test_ptime(self):
with mock.patch('time.gmtime') as mock_gmtime:
mock_gmtime.return_value = time.struct_time(
(2013, 12, 17, 10, 0, 0, 1, 351, 0))
timestamp = self.recon_instance._ptime(1387274400)
self.assertEqual(timestamp, "2013-12-17 10:00:00")
mock_gmtime.assert_called_with(1387274400)
timestamp2 = self.recon_instance._ptime()
self.assertEqual(timestamp2, "2013-12-17 10:00:00")
mock_gmtime.assert_called_with()
def test_get_hosts(self):
self._make_object_rings()
ips = self.recon_instance.get_hosts(
None, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
0, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001)]), ips)
ips = self.recon_instance.get_hosts(
1, None, self.swift_dir, [self.ring_name])
self.assertEqual(
set([('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
0, 0, self.swift_dir, [self.ring_name])
self.assertEqual(set([('127.0.0.1', 10000)]), ips)
ips = self.recon_instance.get_hosts(
1, 1, self.swift_dir, [self.ring_name])
self.assertEqual(set([('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
None, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.1', 10002), ('127.0.0.1', 10003),
('127.0.0.2', 10004)]), ips)
ips = self.recon_instance.get_hosts(
0, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10000), ('127.0.0.1', 10001),
('127.0.0.2', 10004)]), ips)
ips = self.recon_instance.get_hosts(
1, None, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(
set([('127.0.0.1', 10002), ('127.0.0.1', 10003)]), ips)
ips = self.recon_instance.get_hosts(
0, 1, self.swift_dir, [self.ring_name, self.ring_name2])
self.assertEqual(set([('127.0.0.1', 10001),
('127.0.0.2', 10004)]), ips)
def test_get_error_ringnames(self):
# create invalid ring name files
invalid_ring_file_names = ('object.sring.gz',
'object-1.sring.gz',
'broken')
for invalid_ring in invalid_ring_file_names:
ring_path = os.path.join(self.swift_dir, invalid_ring)
with open(ring_path, 'w'):
pass
hosts = [("127.0.0.1", "8080")]
self.recon_instance.verbose = True
self.recon_instance.server_type = 'object'
stdout = StringIO()
with mock.patch('sys.stdout', new=stdout), \
mock.patch('swift.common.utils.md5'):
self.recon_instance.get_ringmd5(hosts, self.swift_dir)
output = stdout.getvalue()
self.assertNotIn('On disk ', output)
def test_get_ringmd5(self):
for server_type in ('account', 'container', 'object', 'object-1'):
ring_name = '%s.ring.gz' % server_type
ring_file = os.path.join(self.swift_dir, ring_name)
open(ring_file, 'w')
empty_file_hash = 'd41d8cd98f00b204e9800998ecf8427e'
bad_file_hash = '00000000000000000000000000000000'
hosts = [("127.0.0.1", "8080")]
with mock.patch('swift.cli.recon.Scout') as mock_scout:
scout_instance = mock.MagicMock()
url = 'http://%s:%s/recon/ringmd5' % hosts[0]
response = {
'/etc/swift/account.ring.gz': empty_file_hash,
'/etc/swift/container.ring.gz': empty_file_hash,
'/etc/swift/object.ring.gz': empty_file_hash,
'/etc/swift/object-1.ring.gz': empty_file_hash,
}
status = 200
scout_instance.scout.return_value = (url, response, status, 0, 0)
mock_scout.return_value = scout_instance
mock_hash = mock.MagicMock()
# Check correct account, container and object ring hashes
for server_type in ('account', 'container', 'object'):
self.recon_instance.server_type = server_type
stdout = StringIO()
with mock.patch('sys.stdout', new=stdout), \
mock.patch('swift.common.utils.md5', new=mock_hash):
mock_hash.return_value.hexdigest.return_value = \
empty_file_hash
self.recon_instance.get_ringmd5(hosts, self.swift_dir)
output = stdout.getvalue()
expected = '1/1 hosts matched'
found = False
for line in output.splitlines():
if '!!' in line:
self.fail('Unexpected Error in output: %r' % line)
if expected in line:
found = True
if not found:
self.fail('Did not find expected substring %r '
'in output:\n%s' % (expected, output))
# Check bad container ring hash
self.recon_instance.server_type = 'container'
response = {
'/etc/swift/account.ring.gz': empty_file_hash,
'/etc/swift/container.ring.gz': bad_file_hash,
'/etc/swift/object.ring.gz': empty_file_hash,
'/etc/swift/object-1.ring.gz': empty_file_hash,
}
scout_instance.scout.return_value = (url, response, status, 0, 0)
mock_scout.return_value = scout_instance
stdout = StringIO()
with mock.patch('sys.stdout', new=stdout), \
mock.patch('swift.common.utils.md5', new=mock_hash):
mock_hash.return_value.hexdigest.return_value = \
empty_file_hash
self.recon_instance.get_ringmd5(hosts, self.swift_dir)
output = stdout.getvalue()
expected = '0/1 hosts matched'
found = False
for line in output.splitlines():
if '!!' in line:
self.assertIn('doesn\'t match on disk md5sum', line)
if expected in line:
found = True
if not found:
self.fail('Did not find expected substring %r '
'in output:\n%s' % (expected, output))
# Check object ring, container mismatch should be ignored
self.recon_instance.server_type = 'object'
stdout = StringIO()
with mock.patch('sys.stdout', new=stdout), \
mock.patch('swift.common.utils.md5', new=mock_hash):
mock_hash.return_value.hexdigest.return_value = \
empty_file_hash
self.recon_instance.get_ringmd5(hosts, self.swift_dir)
output = stdout.getvalue()
expected = '1/1 hosts matched'
for line in output.splitlines():
if '!!' in line:
self.fail('Unexpected Error in output: %r' % line)
if expected in line:
found = True
if not found:
self.fail('Did not find expected substring %r '
'in output:\n%s' % (expected, output))
# Cleanup
self.recon_instance.server_type = 'object'
for ring in ('account', 'container', 'object', 'object-1'):
os.remove(os.path.join(self.swift_dir, "%s.ring.gz" % ring))
def test_quarantine_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040),
('127.0.0.1', 6050)]
# sample json response from http://<host>:<port>/recon/quarantined
responses = {6010: {'accounts': 0, 'containers': 0, 'objects': 1,
'policies': {'0': {'objects': 0},
'1': {'objects': 1}}},
6020: {'accounts': 1, 'containers': 1, 'objects': 3,
'policies': {'0': {'objects': 1},
'1': {'objects': 2}}},
6030: {'accounts': 2, 'containers': 2, 'objects': 5,
'policies': {'0': {'objects': 2},
'1': {'objects': 3}}},
6040: {'accounts': 3, 'containers': 3, 'objects': 7,
'policies': {'0': {'objects': 3},
'1': {'objects': 4}}},
# A server without storage policies enabled
6050: {'accounts': 0, 'containers': 0, 'objects': 4}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = {'objects_0': (0, 3, 1.5, 6, 0.0, 0, 4),
'objects_1': (1, 4, 2.5, 10, 0.0, 0, 4),
'objects': (1, 7, 4.0, 20, 0.0, 0, 5),
'accounts': (0, 3, 1.2, 6, 0.0, 0, 5),
'containers': (0, 3, 1.2, 6, 0.0, 0, 5)}
def mock_scout_quarantine(app, host):
url = 'http://%s:%s/recon/quarantined' % host
response = responses[host[1]]
status = 200
return url, response, status, 0, 0
stdout = StringIO()
with mock.patch('swift.cli.recon.Scout.scout',
mock_scout_quarantine), \
mock.patch('sys.stdout', new=stdout):
self.recon_instance.quarantine_check(hosts)
output = stdout.getvalue()
r = re.compile(r"\[quarantined_(.*)\](.*)")
for line in output.splitlines():
m = r.match(line)
if m:
ex = expected.pop(m.group(1))
self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% ex)
self.assertFalse(expected)
def test_async_check(self):
hosts = [('127.0.0.1', 6011), ('127.0.0.1', 6021),
('127.0.0.1', 6031), ('127.0.0.1', 6041)]
# sample json response from http://<host>:<port>/recon/async
responses = {6011: {'async_pending': 15},
6021: {'async_pending': 0},
6031: {'async_pending': 257},
6041: {'async_pending': 56}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = (0, 257, 82.0, 328, 0.0, 0, 4)
def mock_scout_async(app, host):
url = 'http://%s:%s/recon/async' % host
response = responses[host[1]]
status = 200
return url, response, status, 0, 0
stdout = StringIO()
with mock.patch('swift.cli.recon.Scout.scout',
mock_scout_async), \
mock.patch('sys.stdout', new=stdout):
self.recon_instance.async_check(hosts)
output = stdout.getvalue()
r = re.compile(r"\[async_pending(.*)\](.*)")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:
m = r.match(line)
if m:
self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% expected)
break
else:
self.fail('The expected line is not found')
def test_umount_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040)]
# sample json response from http://<host>:<port>/recon/unmounted
responses = {6010: [{'device': 'sdb1', 'mounted': False}],
6020: [{'device': 'sdb2', 'mounted': False}],
6030: [{'device': 'sdb3', 'mounted': False}],
6040: [{'device': 'sdb4', 'mounted': 'bad'}]}
expected = ['Not mounted: sdb1 on 127.0.0.1:6010',
'Not mounted: sdb2 on 127.0.0.1:6020',
'Not mounted: sdb3 on 127.0.0.1:6030',
'Device errors: sdb4 on 127.0.0.1:6040']
def mock_scout_umount(app, host):
url = 'http://%s:%s/recon/unmounted' % host
response = responses[host[1]]
status = 200
return url, response, status, 0, 0
stdout = StringIO()
with mock.patch('swift.cli.recon.Scout.scout',
mock_scout_umount), \
mock.patch('sys.stdout', new=stdout):
self.recon_instance.umount_check(hosts)
output = stdout.getvalue()
r = re.compile(r"^Not mounted:|Device errors: .*")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:
m = r.match(line)
if m:
self.assertIn(line, expected)
expected.remove(line)
self.assertFalse(expected)
def test_drive_audit_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020),
('127.0.0.1', 6030), ('127.0.0.1', 6040)]
# sample json response from http://<host>:<port>/recon/driveaudit
responses = {6010: {'drive_audit_errors': 15},
6020: {'drive_audit_errors': 0},
6030: {'drive_audit_errors': 257},
6040: {'drive_audit_errors': 56}}
# <low> <high> <avg> <total> <Failed> <no_result> <reported>
expected = (0, 257, 82.0, 328, 0.0, 0, 4)
def mock_scout_driveaudit(app, host):
url = 'http://%s:%s/recon/driveaudit' % host
response = responses[host[1]]
status = 200
return url, response, status, 0, 0
stdout = StringIO()
with mock.patch('swift.cli.recon.Scout.scout',
mock_scout_driveaudit), \
mock.patch('sys.stdout', new=stdout):
self.recon_instance.driveaudit_check(hosts)
output = stdout.getvalue()
r = re.compile(r"\[drive_audit_errors(.*)\](.*)")
lines = output.splitlines()
self.assertTrue(lines)
for line in lines:
m = r.match(line)
if m:
self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s"
% expected)
def test_get_ring_names(self):
self.recon_instance.server_type = 'not-object'
self.assertEqual(self.recon_instance._get_ring_names(), ['not-object'])
self.recon_instance.server_type = 'object'
with patch_policies([StoragePolicy(0, 'zero', is_default=True)]):
self.assertEqual(self.recon_instance._get_ring_names(),
['object'])
with patch_policies([StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one')]):
self.assertEqual(self.recon_instance._get_ring_names(),
['object', 'object-1'])
self.assertEqual(self.recon_instance._get_ring_names('0'),
['object'])
self.assertEqual(self.recon_instance._get_ring_names('zero'),
['object'])
self.assertEqual(self.recon_instance._get_ring_names('1'),
['object-1'])
self.assertEqual(self.recon_instance._get_ring_names('one'),
['object-1'])
self.assertEqual(self.recon_instance._get_ring_names('3'), [])
self.assertEqual(self.recon_instance._get_ring_names('wrong'),
[])
def test_main_object_hosts_default_all_policies(self):
self._make_object_rings()
discovered_hosts = set()
def server_type_check(hosts):
for h in hosts:
discovered_hosts.add(h)
self.recon_instance.server_type_check = server_type_check
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers"]):
self.recon_instance.main()
expected = set([
('127.0.0.1', 10000),
('127.0.0.1', 10001),
('127.0.0.1', 10002),
('127.0.0.1', 10003),
('127.0.0.2', 10004),
])
self.assertEqual(expected, discovered_hosts)
def _test_main_object_hosts_policy_name(self, policy_name='unu'):
self._make_object_rings()
discovered_hosts = set()
def server_type_check(hosts):
for h in hosts:
discovered_hosts.add(h)
self.recon_instance.server_type_check = server_type_check
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers", '--policy', policy_name]):
self.recon_instance.main()
expected = set([
('127.0.0.1', 10000),
('127.0.0.2', 10004),
])
self.assertEqual(expected, discovered_hosts)
def test_main_object_hosts_default_unu(self):
self._test_main_object_hosts_policy_name()
def test_main_object_hosts_default_alias(self):
self._test_main_object_hosts_policy_name(self.policy_name)
def test_main_object_hosts_default_invalid(self):
self._make_object_rings()
stdout = StringIO()
with mock.patch.object(sys, 'argv', [
"prog", "object", "--swiftdir=%s" % self.swift_dir,
"--validate-servers", '--policy=invalid']),\
mock.patch('sys.stdout', stdout):
self.assertRaises(SystemExit, recon.main)
self.assertIn('Invalid Storage Policy', stdout.getvalue())
def test_calculate_least_and_most_recent(self):
now = 1517894596
def test_least_most(data, expected):
stdout = StringIO()
with mock.patch('sys.stdout', new=stdout), \
mock.patch('time.time', return_value=now):
self.recon_instance._calculate_least_and_most_recent(data)
self.assertEqual(stdout.getvalue(), expected)
# first the empty set
test_least_most([], '')
expected = 'Oldest completion was NEVER by my.url.\n'
test_least_most([('http://my.url/is/awesome', 0)], expected)
expected = (
'Oldest completion was 2018-02-06 05:23:11 (5 seconds ago) '
'by my.url.\n'
'Most recent completion was 2018-02-06 05:23:11 (5 seconds ago) '
'by my.url.\n')
data = [('http://my.url/is/awesome', now - 5)]
test_least_most(data, expected)
expected = (
'Oldest completion was 2018-02-06 05:06:36 (16 minutes ago) '
'by a.diff.url.\n'
'Most recent completion was 2018-02-06 05:23:11 (5 seconds ago) '
'by my.url.\n')
data.append(('http://a.diff.url/not/as/awesome', now - 1000))
test_least_most(data, expected)
# now through larger sets at it
for extra in (5, 10, 40, 100):
data.extend([
('http://extra.%d.url/blah' % (extra + r),
now - random.randint(6, 999)) for r in range(extra)])
random.shuffle(data)
test_least_most(data, expected)
class TestReconCommands(unittest.TestCase):
def setUp(self):
self.recon = recon.SwiftRecon()
self.hosts = set([('127.0.0.1', 10000)])
def mock_responses(self, resps):
def fake_urlopen(url, timeout):
scheme, netloc, path, _, _, _ = urllib.parse.urlparse(url)
self.assertEqual(scheme, 'http') # can't handle anything else
self.assertTrue(path.startswith('/recon/'))
if ':' in netloc:
host, port = netloc.split(':', 1)
port = int(port)
else:
host = netloc
port = 80
response_body = resps[(host, port, path[7:])]
resp = mock.MagicMock()
resp.read = mock.MagicMock(side_effect=[
response_body if six.PY2 else response_body.encode('utf8')])
return resp
return mock.patch(GREEN_URLLIB_URLOPEN, fake_urlopen)
def test_server_type_check(self):
hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6011),
('127.0.0.1', 6012)]
# sample json response from http://<host>:<port>/
responses = {6010: 'object-server', 6011: 'container-server',
6012: 'account-server'}
def mock_scout_server_type(app, host):
url = 'http://%s:%s/' % (host[0], host[1])
response = responses[host[1]]
status = 200
return url, response, status
stdout = StringIO()
res_object = 'Invalid: http://127.0.0.1:6010/ is object-server'
res_container = 'Invalid: http://127.0.0.1:6011/ is container-server'
res_account = 'Invalid: http://127.0.0.1:6012/ is account-server'
valid = "1/1 hosts ok, 0 error[s] while checking hosts."
# Test for object server type - default
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertIn(res_container, output.splitlines())
self.assertIn(res_account, output.splitlines())
stdout.truncate(0)
# Test ok for object server type - default
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type_check([hosts[0]])
output = stdout.getvalue()
self.assertIn(valid, output.splitlines())
stdout.truncate(0)
# Test for account server type
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type = 'account'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertIn(res_container, output.splitlines())
self.assertIn(res_object, output.splitlines())
stdout.truncate(0)
# Test ok for account server type
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type = 'account'
self.recon.server_type_check([hosts[2]])
output = stdout.getvalue()
self.assertIn(valid, output.splitlines())
stdout.truncate(0)
# Test for container server type
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type = 'container'
self.recon.server_type_check(hosts)
output = stdout.getvalue()
self.assertIn(res_account, output.splitlines())
self.assertIn(res_object, output.splitlines())
stdout.truncate(0)
# Test ok for container server type
with mock.patch('swift.cli.recon.Scout.scout_server_type',
mock_scout_server_type), \
mock.patch('sys.stdout', new=stdout):
self.recon.server_type = 'container'
self.recon.server_type_check([hosts[1]])
output = stdout.getvalue()
self.assertIn(valid, output.splitlines())
def test_get_swiftconfmd5(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '729cf900f2876dead617d088ece7fe8c'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum})}
printed = []
with self.mock_responses(responses):
with mock.patch('swift.cli.recon.md5_hash_for_file',
lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertIn("2/2 hosts matched", output)
def test_get_swiftconfmd5_mismatch(self):
hosts = set([('10.1.1.1', 10000),
('10.2.2.2', 10000)])
cksum = '29d5912b1fcfcc1066a7f51412769c1d'
responses = {
('10.1.1.1', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': cksum}),
('10.2.2.2', 10000, 'swiftconfmd5'):
json.dumps({'/etc/swift/swift.conf': 'bogus'})}
printed = []
with self.mock_responses(responses):
with mock.patch('swift.cli.recon.md5_hash_for_file',
lambda _: cksum):
self.recon.get_swiftconfmd5(hosts, printfn=printed.append)
output = '\n'.join(printed) + '\n'
self.assertIn("1/2 hosts matched", output)
self.assertIn("http://10.2.2.2:10000/recon/swiftconfmd5 (bogus) "
"doesn't match on disk md5sum", output)
def test_object_auditor_check(self):
# Recon middleware response from an object server
def dummy_request(*args, **kwargs):
values = {
'passes': 0, 'errors': 0, 'audit_time': 0,
'start_time': 0, 'quarantined': 0, 'bytes_processed': 0}
return [('http://127.0.0.1:6010/recon/auditor/object', {
'object_auditor_stats_ALL': values,
'object_auditor_stats_ZBF': values,
}, 200, 0, 0)]
response = {}
def catch_print(computed):
response[computed.get('name')] = computed
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
cli._print_stats = catch_print
cli.object_auditor_check([('127.0.0.1', 6010)])
# Now check that output contains all keys and names
keys = ['average', 'number_none', 'high',
'reported', 'low', 'total', 'perc_none']
names = [
'ALL_audit_time_last_path',
'ALL_quarantined_last_path',
'ALL_errors_last_path',
'ALL_passes_last_path',
'ALL_bytes_processed_last_path',
'ZBF_audit_time_last_path',
'ZBF_quarantined_last_path',
'ZBF_errors_last_path',
'ZBF_bytes_processed_last_path'
]
for name in names:
computed = response.get(name)
self.assertTrue(computed)
for key in keys:
self.assertIn(key, computed)
def test_disk_usage(self):
def dummy_request(*args, **kwargs):
return [('http://127.0.0.1:6010/recon/diskusage', [
{"device": "sdb1", "mounted": True,
"avail": 10, "used": 90, "size": 100},
{"device": "sdc1", "mounted": True,
"avail": 15, "used": 85, "size": 100},
{"device": "sdd1", "mounted": True,
"avail": 15, "used": 85, "size": 100}],
200,
0,
0)]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('Distribution Graph:'),
mock.call(' 85% 2 **********************************' +
'***********************************'),
mock.call(' 90% 1 **********************************'),
mock.call('Disk usage: space used: 260 of 300'),
mock.call('Disk usage: space free: 40 of 300'),
mock.call('Disk usage: lowest: 85.0%, ' +
'highest: 90.0%%, avg: %s' %
('86.6666666667%' if six.PY2 else
'86.66666666666667%')),
mock.call('=' * 79),
]
with mock.patch('six.moves.builtins.print') as mock_print:
cli.disk_usage([('127.0.0.1', 6010)])
mock_print.assert_has_calls(default_calls)
with mock.patch('six.moves.builtins.print') as mock_print:
expected_calls = default_calls + [
mock.call('LOWEST 5'),
mock.call('85.00% 127.0.0.1 sdc1'),
mock.call('85.00% 127.0.0.1 sdd1'),
mock.call('90.00% 127.0.0.1 sdb1')
]
cli.disk_usage([('127.0.0.1', 6010)], 0, 5)
mock_print.assert_has_calls(expected_calls)
with mock.patch('six.moves.builtins.print') as mock_print:
expected_calls = default_calls + [
mock.call('TOP 5'),
mock.call('90.00% 127.0.0.1 sdb1'),
mock.call('85.00% 127.0.0.1 sdc1'),
mock.call('85.00% 127.0.0.1 sdd1')
]
cli.disk_usage([('127.0.0.1', 6010)], 5, 0)
mock_print.assert_has_calls(expected_calls)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_replication_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6011/recon/replication/container',
{"replication_last": now,
"replication_stats": {
"no_change": 2, "rsync": 0, "success": 3, "failure": 1,
"attempted": 0, "ts_repl": 0, "remove": 0,
"remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 42},
200,
0,
0),
('http://127.0.0.1:6021/recon/replication/container',
{"replication_last": now,
"replication_stats": {
"no_change": 0, "rsync": 0, "success": 1, "failure": 0,
"attempted": 0, "ts_repl": 0, "remove": 0,
"remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 23},
200,
0,
0),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[replication_failure] low: 0, high: 1, avg: 0.5, ' +
'total: 1, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_success] low: 1, high: 3, avg: 2.0, ' +
'total: 4, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_time] low: 23, high: 42, avg: 32.5, ' +
'total: 65, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[replication_attempted] low: 0, high: 0, avg: 0.0, ' +
'total: 0, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
mock.call('Most recent completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
]
mock_now.return_value = now + 42
cli.replication_check([('127.0.0.1', 6011), ('127.0.0.1', 6021)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_sharding_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6011/recon/replication/container',
{"sharding_last": now - 50,
"sharding_stats": {
"attempted": 0, "deferred": 0, "diff": 0,
"diff_capped": 0, "empty": 0, "failure": 0,
"hashmatch": 0, "no_change": 0, "remote_merge": 0,
"remove": 0, "rsync": 0,
"sharding": {
"audit_root": {
"attempted": 0, "failure": 0, "success": 0},
"audit_shard": {
"attempted": 0, "failure": 0, "success": 0},
"cleaved": {
"attempted": 0, "failure": 0, "max_time": 0,
"min_time": 0, "success": 0},
"created": {
"attempted": 0, "failure": 0, "success": 0},
"misplaced": {
"attempted": 0, "failure": 0, "found": 0,
"placed": 0, "success": 0, "unplaced": 0},
"scanned": {
"attempted": 0, "failure": 0, "found": 0,
"max_time": 0, "min_time": 0, "success": 0},
"sharding_candidates": {
"found": 0,
"top": []},
"shrinking_candidates": {
"found": 0,
"top": []},
"visited": {
"attempted": 0, "completed": 0, "failure": 0,
"skipped": 1381, "success": 0}},
"start": now - 80,
"success": 0, "ts_repl": 0},
"sharding_time": 27.6},
200,
0,
0),
('http://127.0.0.1:6021/recon/sharding',
{"sharding_last": now - 50,
"sharding_stats": {
"attempted": 0, "deferred": 0, "diff": 0,
"diff_capped": 0, "empty": 0, "failure": 0,
"hashmatch": 0, "no_change": 0, "remote_merge": 0,
"remove": 0, "rsync": 0,
"sharding": {
"audit_root": {
"attempted": 0, "failure": 0, "success": 0},
"audit_shard": {
"attempted": 0, "failure": 0, "success": 0},
"cleaved": {
"attempted": 0, "failure": 0, "max_time": 0,
"min_time": 0, "success": 0},
"created": {
"attempted": 0, "failure": 0, "success": 0},
"misplaced": {
"attempted": 0, "failure": 0, "found": 0,
"placed": 0, "success": 0, "unplaced": 0},
"scanned": {
"attempted": 0, "failure": 0, "found": 0,
"max_time": 0, "min_time": 0, "success": 0},
"sharding_candidates": {
"found": 0,
"top": []},
"shrinking_candidates": {
"found": 0,
"top": []},
"visited": {
"attempted": 0, "completed": 0, "failure": 0,
"skipped": 1381, "success": 0}},
"start": now - 80,
"success": 0, "ts_repl": 0},
"sharding_time": 27.6},
200,
0,
0),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
# All totals are zero in our test set above. Maybe do better later.
default_calls = [
mock.call('[sharding_time] low: 27, high: 27, avg: 27.6, ' +
'total: 55, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[attempted] low: 0, high: 0, avg: 0.0, ' +
'total: 0, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[failure] low: 0, high: 0, avg: 0.0, ' +
'total: 0, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[success] low: 0, high: 0, avg: 0.0, ' +
'total: 0, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:12:30 ' +
'(1 minutes ago) by 127.0.0.1:6011.'),
mock.call('Most recent completion was 2015-04-25 22:12:30 ' +
'(1 minutes ago) by 127.0.0.1:6011.'),
]
mock_now.return_value = now + 48
cli.sharding_check([('127.0.0.1', 6011), ('127.0.0.1', 6021)])
mock_print.assert_has_calls(default_calls, any_order=True)
@ mock.patch('six.moves.builtins.print')
@ mock.patch('time.time')
def test_reconstruction_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6011/recon/reconstruction',
{"object_reconstruction_last": now,
"object_reconstruction_time": 42},
200, 0, 0),
('http://127.0.0.1:6021/recon/reconstruction',
{"object_reconstruction_last": now,
"object_reconstruction_time": 23},
200, 0, 0)]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[object_reconstruction_time] low: 23, high: 42, '
'avg: 32.5, total: 65, Failed: 0.0%, no_result: 0, '
'reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
mock.call('Most recent completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6011.'),
]
mock_now.return_value = now + 42
cli.reconstruction_check([('127.0.0.1', 6011), ('127.0.0.1', 6021)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_load_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/load',
{"1m": 0.2, "5m": 0.4, "15m": 0.25,
"processes": 10000, "tasks": "1/128"},
200, 0, 0),
('http://127.0.0.1:6020/recon/load',
{"1m": 0.4, "5m": 0.8, "15m": 0.75,
"processes": 9000, "tasks": "1/200"},
200, 0, 0)]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[5m_load_avg] low: 0, high: 0, avg: 0.6, total: 1, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[15m_load_avg] low: 0, high: 0, avg: 0.5, total: 1, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('[1m_load_avg] low: 0, high: 0, avg: 0.3, total: 0, ' +
'Failed: 0.0%, no_result: 0, reported: 2'),
]
mock_now.return_value = now + 42
cli.load_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_time_check(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/time',
now,
200,
now - 0.5,
now + 0.5),
('http://127.0.0.1:6020/recon/time',
now,
200,
now,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('2/2 hosts matched, 0 error[s] while checking hosts.')
]
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_time_check_mismatch(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/time',
now,
200,
now + 0.5,
now + 1.3),
('http://127.0.0.1:6020/recon/time',
now,
200,
now,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call("!! http://127.0.0.1:6010/recon/time current time is "
"2015-04-25 22:13:21, but remote is "
"2015-04-25 22:13:20, differs by 1.3000 sec"),
mock.call('1/2 hosts matched, 0 error[s] while checking hosts.'),
]
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_time_check_jitter(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/time',
now - 2,
200,
now,
now + 3),
('http://127.0.0.1:6020/recon/time',
now + 2,
200,
now - 3,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('2/2 hosts matched, 0 error[s] while checking hosts.')
]
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)], 3)
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
def test_version_check(self, mock_print):
version = "2.7.1.dev144"
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/version',
{'version': version},
200,
0,
0),
('http://127.0.0.1:6020/recon/version',
{'version': version},
200,
0,
0),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call("Versions matched (%s), "
"0 error[s] while checking hosts." % version)
]
cli.version_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('time.time')
def test_time_check_jitter_mismatch(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/time',
now - 4,
200,
now,
now + 2),
('http://127.0.0.1:6020/recon/time',
now + 4,
200,
now - 2,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call("!! http://127.0.0.1:6010/recon/time current time is "
"2015-04-25 22:13:22, but remote is "
"2015-04-25 22:13:16, differs by 6.0000 sec"),
mock.call("!! http://127.0.0.1:6020/recon/time current time is "
"2015-04-25 22:13:20, but remote is "
"2015-04-25 22:13:24, differs by 4.0000 sec"),
mock.call('0/2 hosts matched, 0 error[s] while checking hosts.'),
]
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)], 3)
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
def test_version_check_differs(self, mock_print):
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/version',
{'version': "2.7.1.dev144"},
200,
0,
0),
('http://127.0.0.1:6020/recon/version',
{'version': "2.7.1.dev145"},
200,
0,
0),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call("Versions not matched (2.7.1.dev144, 2.7.1.dev145), "
"0 error[s] while checking hosts.")
]
cli.version_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('six.moves.builtins.print')
@mock.patch('swift.cli.recon.SwiftRecon.get_hosts')
def test_multiple_server_types(self, mock_get_hosts, mock_print):
mock_get_hosts.return_value = set([('127.0.0.1', 10000)])
self.recon.object_auditor_check = mock.MagicMock()
self.recon.auditor_check = mock.MagicMock()
with mock.patch.object(
sys, 'argv',
["prog", "account", "container", "object", "--auditor"]):
self.recon.main()
expected_calls = [
mock.call("--> Starting reconnaissance on 1 hosts (account)"),
mock.call("--> Starting reconnaissance on 1 hosts (container)"),
mock.call("--> Starting reconnaissance on 1 hosts (object)"),
]
mock_print.assert_has_calls(expected_calls, any_order=True)
expected = mock.call(set([('127.0.0.1', 10000)]))
self.recon.object_auditor_check.assert_has_calls([expected])
# Two calls expected - one account, one container
self.recon.auditor_check.assert_has_calls([expected, expected])
| swift-master | test/unit/cli/test_recon.py |
# Copyright (c) 2014 Christian Schwede <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import itertools
import logging
import mock
import os
import re
import six
import tempfile
import unittest
import uuid
import shlex
import shutil
import time
from swift.cli import ringbuilder
from swift.cli.ringbuilder import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from swift.common import exceptions
from swift.common.ring import RingBuilder
from swift.common.ring.composite_builder import CompositeRingBuilder
from test.unit import Timeout, write_stub_builder
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
class RunSwiftRingBuilderMixin(object):
def run_srb(self, *argv, **kwargs):
if len(argv) == 1 and isinstance(argv[0], six.string_types):
# convert a single string to a list
argv = shlex.split(argv[0])
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
if 'exp_results' in kwargs:
exp_results = kwargs['exp_results']
else:
exp_results = None
srb_args = ["", self.tempfile] + [str(s) for s in argv]
try:
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
ringbuilder.main(srb_args)
except SystemExit as err:
valid_exit_codes = None
if exp_results is not None and 'valid_exit_codes' in exp_results:
valid_exit_codes = exp_results['valid_exit_codes']
else:
valid_exit_codes = (0, 1) # (success, warning)
if err.code not in valid_exit_codes:
msg = 'Unexpected exit status %s\n' % err.code
msg += 'STDOUT:\n%s\nSTDERR:\n%s\n' % (
mock_stdout.getvalue(), mock_stderr.getvalue())
self.fail(msg)
return (mock_stdout.getvalue(), mock_stderr.getvalue())
class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
def __init__(self, *args, **kwargs):
super(TestCommands, self).__init__(*args, **kwargs)
# List of search values for various actions
# These should all match the first device in the sample ring
# (see below) but not the second device
self.search_values = ["d0", "/sda1", "r0", "z0", "z0-127.0.0.1",
"127.0.0.1", "z0:6200", ":6200", "R127.0.0.1",
"127.0.0.1R127.0.0.1", "R:6200",
"_some meta data"]
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
tmpf = tempfile.NamedTemporaryFile(dir=self.tmpdir)
self.tempfile = self.tmpfile = tmpf.name
def tearDown(self):
try:
shutil.rmtree(self.tmpdir, True)
except OSError:
pass
def assertOutputStub(self, output, ext='stub',
builder_id='(not assigned)'):
"""
assert that the given output string is equal to a in-tree stub file,
if a test needs to check multiple outputs it can use custom ext's
"""
filepath = os.path.abspath(
os.path.join(os.path.dirname(__file__), self.id().split('.')[-1]))
print(filepath)
filepath = '%s.%s' % (filepath, ext)
try:
with open(filepath, 'r') as f:
stub = f.read()
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
self.fail('%r does not exist' % filepath)
else:
self.fail('%r could not be read (%s)' % (filepath, e))
output = output.replace(self.tempfile, '__RINGFILE__')
stub = stub.replace('__BUILDER_ID__', builder_id)
for i, (value, expected) in enumerate(
zip_longest(output.splitlines(), stub.splitlines())):
# N.B. differences in trailing whitespace are ignored!
value = (value or '').rstrip()
expected = (expected or '').rstrip()
try:
self.assertEqual(value, expected)
except AssertionError:
msg = 'Line #%s value is not like expected:\n%r\n%r' % (
i, value, expected)
msg += '\n\nFull output was:\n'
for i, line in enumerate(output.splitlines()):
msg += '%3d: %s\n' % (i, line)
msg += '\n\nCompared to stub:\n'
for i, line in enumerate(stub.splitlines()):
msg += '%3d: %s\n' % (i, line)
self.fail(msg)
def create_sample_ring(self, part_power=6, replicas=3, overload=None,
empty=False):
"""
Create a sample ring with four devices
At least four devices are needed to test removing
a device, since having less devices than replicas
is not allowed.
"""
# Ensure there is no existing test builder file because
# create_sample_ring() might be used more than once in a single test
try:
os.remove(self.tmpfile)
except OSError:
pass
ring = RingBuilder(part_power, replicas, 1)
if overload is not None:
ring.set_overload(overload)
if not empty:
ring.add_dev({'weight': 100.0,
'region': 0,
'zone': 0,
'ip': '127.0.0.1',
'port': 6200,
'device': 'sda1',
'meta': 'some meta data',
})
ring.add_dev({'weight': 100.0,
'region': 1,
'zone': 1,
'ip': '127.0.0.2',
'port': 6201,
'device': 'sda2'
})
ring.add_dev({'weight': 100.0,
'region': 2,
'zone': 2,
'ip': '127.0.0.3',
'port': 6202,
'device': 'sdc3'
})
ring.add_dev({'weight': 100.0,
'region': 3,
'zone': 3,
'ip': '127.0.0.4',
'port': 6203,
'device': 'sdd4'
})
ring.save(self.tmpfile)
return ring
def assertSystemExit(self, return_code, func, *argv):
with self.assertRaises(SystemExit) as cm:
func(*argv)
self.assertEqual(return_code, cm.exception.code)
def test_parse_search_values_old_format(self):
# Test old format
argv = ["d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"]
search_values = ringbuilder._parse_search_values(argv)
self.assertEqual(search_values['id'], 0)
self.assertEqual(search_values['region'], 0)
self.assertEqual(search_values['zone'], 0)
self.assertEqual(search_values['ip'], '127.0.0.1')
self.assertEqual(search_values['port'], 6200)
self.assertEqual(search_values['replication_ip'], '127.0.0.1')
self.assertEqual(search_values['replication_port'], 6200)
self.assertEqual(search_values['device'], 'sda1')
self.assertEqual(search_values['meta'], 'some meta data')
def test_parse_search_values_new_format(self):
# Test new format
argv = ["--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data",
"--weight", "100"]
search_values = ringbuilder._parse_search_values(argv)
self.assertEqual(search_values['id'], 0)
self.assertEqual(search_values['region'], 0)
self.assertEqual(search_values['zone'], 0)
self.assertEqual(search_values['ip'], '127.0.0.1')
self.assertEqual(search_values['port'], 6200)
self.assertEqual(search_values['replication_ip'], '127.0.0.1')
self.assertEqual(search_values['replication_port'], 6200)
self.assertEqual(search_values['device'], 'sda1')
self.assertEqual(search_values['meta'], 'some meta data')
self.assertEqual(search_values['weight'], 100)
def test_parse_search_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["--region", "2", "test"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_search_values, argv)
def test_find_parts(self):
rb = RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdb1'})
rb.rebalance()
rb.add_dev({'id': 6, 'region': 2, 'zone': 1, 'weight': 10,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
ringbuilder.builder = rb
sorted_partition_count = ringbuilder._find_parts(
rb.search_devs({'ip': '127.0.0.1'}))
# Expect 256 partitions in the output
self.assertEqual(256, len(sorted_partition_count))
# Each partitions should have 3 replicas
for partition, count in sorted_partition_count:
self.assertEqual(
3, count, "Partition %d has only %d replicas" %
(partition, count))
def test_parse_list_parts_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["--region", "2", "test"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_list_parts_values, argv)
def test_parse_add_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["--region", "2", "test"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_add_values, argv)
def test_set_weight_values_no_devices(self):
# Test no devices
# _set_weight_values doesn't take argv-like arguments
self.assertSystemExit(
EXIT_ERROR, ringbuilder._set_weight_values, [], 100, {})
def test_parse_set_weight_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["r1", "100", "r2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_weight_values, argv)
argv = ["--region", "2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_weight_values, argv)
def test_set_region_values_no_devices(self):
# Test no devices
self.assertSystemExit(
EXIT_ERROR, ringbuilder._set_region_values, [], 100, {})
def test_parse_set_region_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["r1", "100", "r2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_region_values, argv)
argv = ["--region", "2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_region_values, argv)
def test_set_zone_values_no_devices(self):
# Test no devices
self.assertSystemExit(
EXIT_ERROR, ringbuilder._set_zone_values, [], 100, {})
def test_parse_set_zone_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["r1", "100", "r2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_zone_values, argv)
argv = ["--region", "2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_zone_values, argv)
def test_set_info_values_no_devices(self):
# Test no devices
# _set_info_values doesn't take argv-like arguments
self.assertSystemExit(
EXIT_ERROR, ringbuilder._set_info_values, [], 100, {})
def test_parse_set_info_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["r1", "127.0.0.1", "r2"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_set_info_values, argv)
def test_parse_remove_values_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["--region", "2", "test"]
self.assertSystemExit(
EXIT_ERROR, ringbuilder._parse_remove_values, argv)
def test_create_ring(self):
argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.part_power, 6)
self.assertEqual(ring.replicas, 3.14159265359)
self.assertEqual(ring.min_part_hours, 1)
def test_create_ring_number_of_arguments(self):
# Test missing arguments
argv = ["", self.tmpfile, "create"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_add_device_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "add",
"r2z3-127.0.0.1:6200/sda3_some meta data", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 3.14159265359)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['meta'], 'some meta data')
def test_add_duplicate_devices(self):
self.create_sample_ring()
# Test adding duplicate devices
argv = ["", self.tmpfile, "add",
"r1z1-127.0.0.1:6200/sda9", "3.14159265359",
"r1z1-127.0.0.1:6200/sda9", "2"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_add_device_ipv6_old_format(self):
self.create_sample_ring()
# Test ipv6(old format)
argv = \
["", self.tmpfile, "add",
"r2z3-2001:0000:1234:0000:0000:C1C0:ABCD:0876:6200"
"R2::10:7000/sda3_some meta data",
"3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '2001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 3.14159265359)
self.assertEqual(dev['replication_ip'], '2::10')
self.assertEqual(dev['replication_port'], 7000)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_add_device_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "127.0.0.2",
"--port", "6200",
"--replication-ip", "127.0.0.2",
"--replication-port", "6200",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 3.14159265359)
self.assertEqual(dev['replication_ip'], '127.0.0.2')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_add_device_ipv6_new_format(self):
self.create_sample_ring()
# Test ipv6(new format)
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[3::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '3001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 3.14159265359)
self.assertEqual(dev['replication_ip'], '3::10')
self.assertEqual(dev['replication_port'], 7000)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_add_device_domain_new_format(self):
self.create_sample_ring()
# Test domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], 'test.test.com')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 3.14159265359)
self.assertEqual(dev['replication_ip'], 'r.test.com')
self.assertEqual(dev['replication_port'], 7000)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_add_device_number_of_arguments(self):
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "add"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_add_device_already_exists(self):
# Test Add a device that already exists
argv = ["", self.tmpfile, "add",
"r0z0-127.0.0.1:6200/sda1_some meta data", "100"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_add_device_old_missing_region(self):
self.create_sample_ring()
# Test add device without specifying a region
argv = ["", self.tmpfile, "add",
"z3-127.0.0.1:6200/sde3_some meta data", "3.14159265359"]
exp_results = {'valid_exit_codes': [2]}
self.run_srb(*argv, exp_results=exp_results)
# Check that ring was created with sane value for region
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertGreater(dev['region'], 0)
def test_add_device_part_power_increase(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.next_part_power = 1
ring.save(self.tmpfile)
argv = ["", self.tmpfile, "add",
"r0z0-127.0.1.1:6200/sda1_some meta data", "100"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_remove_device(self):
for search_value in self.search_values:
self.create_sample_ring()
argv = ["", self.tmpfile, "remove", search_value]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was set to 0
dev = ring.devs[0]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 0)
self.assertEqual(dev['zone'], 0)
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "remove",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was set to 0
dev = ring.devs[0]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 0)
self.assertEqual(dev['zone'], 0)
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(old format)
argv = ["", self.tmpfile, "remove",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200"
"R[2::10]:7000/sda3_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 0])
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Check that weight was set to 0
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '2001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], '2::10')
self.assertEqual(dev['replication_port'], 7000)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "remove",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was set to 0
dev = ring.devs[0]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 0)
self.assertEqual(dev['zone'], 0)
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_ipv6_new_format(self):
self.create_sample_ring()
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "8000",
"--replication-ip", "[3::10]",
"--replication-port", "9000",
"--device", "sda30", "--meta", "other meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "remove",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[3001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "8000",
"--replication-ip", "[3::10]",
"--replication-port", "9000",
"--device", "sda30", "--meta", "other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 0])
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Check that weight was set to 0
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], '3001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 8000)
self.assertEqual(dev['device'], 'sda30')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], '3::10')
self.assertEqual(dev['replication_port'], 9000)
self.assertEqual(dev['meta'], 'other meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test domain name
argv = \
["", self.tmpfile, "remove",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 0])
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
self.assertFalse([d for d in ring._remove_devs if d['id'] == 1])
# Check that weight was set to 0
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 0)
# Check that device is in list of devices to be removed
self.assertEqual(dev['region'], 2)
self.assertEqual(dev['zone'], 3)
self.assertEqual(dev['ip'], 'test.test.com')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['device'], 'sda3')
self.assertEqual(dev['weight'], 0)
self.assertEqual(dev['replication_ip'], 'r.test.com')
self.assertEqual(dev['replication_port'], 7000)
self.assertEqual(dev['meta'], 'some meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_remove_device_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "remove"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_remove_device_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "remove",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_remove_device_part_power_increase(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.next_part_power = 1
ring.save(self.tmpfile)
argv = ["", self.tmpfile, "remove", "d0"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_weight(self):
for search_value in self.search_values:
self.create_sample_ring()
argv = ["", self.tmpfile, "set_weight",
search_value, "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was changed
dev = ring.devs[0]
self.assertEqual(dev['weight'], 3.14159265359)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_old_format_two_devices(self):
# Would block without the 'yes' argument
self.create_sample_ring()
argv = ["", self.tmpfile, "set_weight",
"d2", "3.14", "d1", "6.28", "--yes"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was changed
self.assertEqual(ring.devs[2]['weight'], 3.14)
self.assertEqual(ring.devs[1]['weight'], 6.28)
# Check that other devices in ring are not affected
self.assertEqual(ring.devs[0]['weight'], 100)
self.assertEqual(ring.devs[3]['weight'], 100)
def test_set_weight_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "set_weight",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data",
"3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was changed
dev = ring.devs[0]
self.assertEqual(dev['weight'], 3.14159265359)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(old format)
argv = ["", self.tmpfile, "set_weight",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200"
"R[2::10]:7000/sda3_some meta data", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Check that weight was changed
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 3.14159265359)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "set_weight",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was changed
dev = ring.devs[0]
self.assertEqual(dev['weight'], 3.14159265359)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "set_weight",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Check that weight was changed
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 3.14159265359)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test domain name
argv = \
["", self.tmpfile, "set_weight",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['weight'], 100)
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['weight'], 100)
# Check that weight was changed
dev = ring.devs[-1]
self.assertEqual(dev['weight'], 3.14159265359)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_weight"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_weight_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "set_weight",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def _check_region(self, ring, dev_id, expected_region):
for dev in ring.devs:
if dev['id'] != dev_id:
self.assertNotEqual(dev['region'], expected_region)
else:
self.assertEqual(dev['region'], expected_region)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_region(self):
for search_value in self.search_values:
self.create_sample_ring()
argv = ["", self.tmpfile, "set_region",
search_value, "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 0, 314)
def test_set_region_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "set_region",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data",
"314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 0, 314)
def test_set_region_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(old format)
argv = ["", self.tmpfile, "set_region",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000"
"R[2::10]:7000/sda3_some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 4, 314)
def test_set_region_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "set_region",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 0, 314)
def test_set_region_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "set_region",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 4, 314)
def test_set_region_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test domain name
argv = \
["", self.tmpfile, "set_region",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_region(ring, 4, 314)
def test_set_region_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_region"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_region_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "set_region",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_zone(self):
for search_value in self.search_values:
self.create_sample_ring()
argv = ["", self.tmpfile, "set_zone",
search_value, "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 0, 314)
def test_set_zone_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "set_zone",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data",
"314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 0, 314)
def _check_zone(self, ring, dev_id, expected_zone):
for dev in ring.devs:
if dev['id'] != dev_id:
self.assertFalse(dev['zone'] == expected_zone)
else:
self.assertEqual(dev['zone'], expected_zone)
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_zone_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(old format)
argv = ["", self.tmpfile, "set_zone",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6000"
"R[2::10]:7000/sda3_some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 4, 314)
def test_set_zone_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "set_zone",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 0, 314)
def test_set_zone_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "set_zone",
"--id", "4", "--region", "2",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6000",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 4, 314)
def test_set_zone_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "100"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test domain name
argv = \
["", self.tmpfile, "set_zone",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data", "314"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self._check_zone(ring, 4, 314)
def test_set_zone_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_zone"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_zone_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "set_zone",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_info(self):
for search_value in self.search_values:
self.create_sample_ring()
argv = ["", self.tmpfile, "set_info", search_value,
"127.0.1.1:8000/sda1_other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.1.1')
self.assertEqual(dev['port'], 8000)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['meta'], 'other meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "set_info",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data",
"127.0.1.1:8000R127.0.1.1:8000/sda10_other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.1.1')
self.assertEqual(dev['port'], 8000)
self.assertEqual(dev['replication_ip'], '127.0.1.1')
self.assertEqual(dev['replication_port'], 8000)
self.assertEqual(dev['device'], 'sda10')
self.assertEqual(dev['meta'], 'other meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(old format)
argv = ["", self.tmpfile, "set_info",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200"
"R[2::10]:7000/sda3_some meta data",
"[3001:0000:1234:0000:0000:C1C0:ABCD:0876]:8000"
"R[3::10]:8000/sda30_other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Check that device was created with given data
dev = ring.devs[-1]
self.assertEqual(dev['ip'], '3001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 8000)
self.assertEqual(dev['replication_ip'], '3::10')
self.assertEqual(dev['replication_port'], 8000)
self.assertEqual(dev['device'], 'sda30')
self.assertEqual(dev['meta'], 'other meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "set_info",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data",
"--change-ip", "127.0.2.1",
"--change-port", "9000",
"--change-replication-ip", "127.0.2.1",
"--change-replication-port", "9000",
"--change-device", "sda100", "--change-meta", "other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.2.1')
self.assertEqual(dev['port'], 9000)
self.assertEqual(dev['replication_ip'], '127.0.2.1')
self.assertEqual(dev['replication_port'], 9000)
self.assertEqual(dev['device'], 'sda100')
self.assertEqual(dev['meta'], 'other meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "set_info",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--change-ip", "[4001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--change-port", "9000",
"--change-replication-ip", "[4::10]",
"--change-replication-port", "9000",
"--change-device", "sda300", "--change-meta", "other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Check that device was created with given data
ring = RingBuilder.load(self.tmpfile)
dev = ring.devs[-1]
self.assertEqual(dev['ip'], '4001:0:1234::c1c0:abcd:876')
self.assertEqual(dev['port'], 9000)
self.assertEqual(dev['replication_ip'], '4::10')
self.assertEqual(dev['replication_port'], 9000)
self.assertEqual(dev['device'], 'sda300')
self.assertEqual(dev['meta'], 'other meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Test domain name
argv = \
["", self.tmpfile, "set_info",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--change-ip", "test.test2.com",
"--change-port", "9000",
"--change-replication-ip", "r.test2.com",
"--change-replication-port", "9000",
"--change-device", "sda300", "--change-meta", "other meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that second device in ring is not affected
dev = ring.devs[0]
self.assertEqual(dev['ip'], '127.0.0.1')
self.assertEqual(dev['port'], 6200)
self.assertEqual(dev['replication_ip'], '127.0.0.1')
self.assertEqual(dev['replication_port'], 6200)
self.assertEqual(dev['device'], 'sda1')
self.assertEqual(dev['meta'], 'some meta data')
# Check that second device in ring is not affected
dev = ring.devs[1]
self.assertEqual(dev['ip'], '127.0.0.2')
self.assertEqual(dev['port'], 6201)
self.assertEqual(dev['device'], 'sda2')
self.assertEqual(dev['meta'], '')
# Check that device was created with given data
dev = ring.devs[-1]
self.assertEqual(dev['ip'], 'test.test2.com')
self.assertEqual(dev['port'], 9000)
self.assertEqual(dev['replication_ip'], 'r.test2.com')
self.assertEqual(dev['replication_port'], 9000)
self.assertEqual(dev['device'], 'sda300')
self.assertEqual(dev['meta'], 'other meta data')
# Final check, rebalance and check ring is ok
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_info_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_info"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_info_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "set_info",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_info_already_exists(self):
self.create_sample_ring()
# Test Set a device that already exists
argv = \
["", self.tmpfile, "set_info",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data",
"--change-ip", "127.0.0.2",
"--change-port", "6201",
"--change-replication-ip", "127.0.0.2",
"--change-replication-port", "6201",
"--change-device", "sda2", "--change-meta", ""]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_min_part_hours(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "set_min_part_hours", "24"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_hours, 24)
def test_set_min_part_hours_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_min_part_hours"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_replicas(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "set_replicas", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.replicas, 3.14159265359)
def test_set_overload(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "set_overload", "0.19878"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 0.19878)
def test_set_overload_negative(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "set_overload", "-0.19878"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 0.0)
def test_set_overload_non_numeric(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "set_overload", "swedish fish"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 0.0)
def test_set_overload_percent(self):
self.create_sample_ring()
argv = "set_overload 10%".split()
out, err = self.run_srb(*argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 0.1)
self.assertIn('10.00%', out)
self.assertIn('0.100000', out)
def test_set_overload_percent_strange_input(self):
self.create_sample_ring()
argv = "set_overload 26%%%%".split()
out, err = self.run_srb(*argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 0.26)
self.assertIn('26.00%', out)
self.assertIn('0.260000', out)
def test_server_overload_crazy_high(self):
self.create_sample_ring()
argv = "set_overload 10".split()
out, err = self.run_srb(*argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 10.0)
self.assertIn('Warning overload is greater than 100%', out)
self.assertIn('1000.00%', out)
self.assertIn('10.000000', out)
# but it's cool if you do it on purpose
argv[-1] = '1000%'
out, err = self.run_srb(*argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.overload, 10.0)
self.assertNotIn('Warning overload is greater than 100%', out)
self.assertIn('1000.00%', out)
self.assertIn('10.000000', out)
def test_set_overload_number_of_arguments(self):
self.create_sample_ring()
# Test missing arguments
argv = ["", self.tmpfile, "set_overload"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_replicas_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "set_replicas"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_set_replicas_invalid_value(self):
self.create_sample_ring()
# Test not a valid number
argv = ["", self.tmpfile, "set_replicas", "test"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
# Test new replicas is 0
argv = ["", self.tmpfile, "set_replicas", "0"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_validate(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
argv = ["", self.tmpfile, "validate"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_validate_composite_builder_file(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
cb = CompositeRingBuilder([b1_file, b2_file])
cb.compose()
cb_file = os.path.join(self.tmpdir, 'composite.builder')
cb.save(cb_file)
argv = ["", cb_file, "validate"]
with mock.patch("sys.stdout", six.StringIO()) as mock_stdout:
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
lines = mock_stdout.getvalue().strip().split('\n')
self.assertIn("Ring Builder file is invalid", lines[0])
self.assertIn("appears to be a composite ring builder file", lines[0])
self.assertFalse(lines[1:])
def test_validate_empty_file(self):
open(self.tmpfile, 'a').close
argv = ["", self.tmpfile, "validate"]
with mock.patch("sys.stdout", six.StringIO()) as mock_stdout:
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
lines = mock_stdout.getvalue().strip().split('\n')
self.assertIn("Ring Builder file is invalid", lines[0])
self.assertNotIn("appears to be a composite ring builder file",
lines[0])
self.assertFalse(lines[1:])
def test_validate_corrupted_file(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
self.assertTrue(ring.validate()) # ring is valid until now
ring.save(self.tmpfile)
argv = ["", self.tmpfile, "validate"]
# corrupt the file
with open(self.tmpfile, 'wb') as f:
f.write(os.urandom(1024))
with mock.patch("sys.stdout", six.StringIO()) as mock_stdout:
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
lines = mock_stdout.getvalue().strip().split('\n')
self.assertIn("Ring Builder file is invalid", lines[0])
self.assertNotIn("appears to be a composite ring builder file",
lines[0])
self.assertFalse(lines[1:])
def test_validate_non_existent_file(self):
rand_file = '%s/%s' % (tempfile.gettempdir(), str(uuid.uuid4()))
argv = ["", rand_file, "validate"]
with mock.patch("sys.stdout", six.StringIO()) as mock_stdout:
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
lines = mock_stdout.getvalue().strip().split('\n')
self.assertIn("Ring Builder file does not exist", lines[0])
self.assertNotIn("appears to be a composite ring builder file",
lines[0])
self.assertFalse(lines[1:])
def test_validate_non_accessible_file(self):
with mock.patch.object(
RingBuilder, 'load',
mock.Mock(side_effect=exceptions.PermissionError("boom"))):
argv = ["", self.tmpfile, "validate"]
with mock.patch("sys.stdout", six.StringIO()) as mock_stdout:
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
lines = mock_stdout.getvalue().strip().split('\n')
self.assertIn("boom", lines[0])
self.assertFalse(lines[1:])
def test_validate_generic_error(self):
with mock.patch.object(
RingBuilder, 'load', mock.Mock(
side_effect=IOError('Generic error occurred'))):
argv = ["", self.tmpfile, "validate"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_search_device_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)
argv = ["", self.tmpfile, "search",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_search_device_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv6(old format)
argv = ["", self.tmpfile, "search",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200"
"R[2::10]:7000/sda3_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_search_device_ipv4_new_format(self):
self.create_sample_ring()
# Test ipv4(new format)
argv = \
["", self.tmpfile, "search",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_search_device_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "search",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_search_device_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test domain name
argv = \
["", self.tmpfile, "search",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_search_device_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "search"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_search_device_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "search",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_list_parts_ipv4_old_format(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv4(old format)
argv = ["", self.tmpfile, "list_parts",
"d0r0z0-127.0.0.1:6200R127.0.0.1:6200/sda1_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_list_parts_ipv6_old_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv6(old format)
argv = ["", self.tmpfile, "list_parts",
"d4r2z3-[2001:0000:1234:0000:0000:C1C0:ABCD:0876]:6200"
"R[2::10]:7000/sda3_some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_list_parts_ipv4_new_format(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv4(new format)
argv = \
["", self.tmpfile, "list_parts",
"--id", "0", "--region", "0", "--zone", "0",
"--ip", "127.0.0.1",
"--port", "6200",
"--replication-ip", "127.0.0.1",
"--replication-port", "6200",
"--device", "sda1", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_list_parts_ipv6_new_format(self):
self.create_sample_ring()
# add IPV6
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test ipv6(new format)
argv = \
["", self.tmpfile, "list_parts",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "[2001:0000:1234:0000:0000:C1C0:ABCD:0876]",
"--port", "6200",
"--replication-ip", "[2::10]",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_list_parts_domain_new_format(self):
self.create_sample_ring()
# add domain name
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data",
"--weight", "3.14159265359"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# write ring file
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test domain name
argv = \
["", self.tmpfile, "list_parts",
"--id", "4", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6200",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3", "--meta", "some meta data"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_list_parts_number_of_arguments(self):
self.create_sample_ring()
# Test Number of arguments abnormal
argv = ["", self.tmpfile, "list_parts"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_list_parts_no_matching(self):
self.create_sample_ring()
# Test No matching devices
argv = ["", self.tmpfile, "list_parts",
"--ip", "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_unknown(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "unknown"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_default(self):
self.create_sample_ring()
argv = ["", self.tmpfile]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_default_output(self):
with mock.patch('uuid.uuid4', return_value=mock.Mock(hex=None)):
self.create_sample_ring()
out, err = self.run_srb('')
self.assertOutputStub(out)
def test_default_output_id_assigned(self):
ring = self.create_sample_ring()
out, err = self.run_srb('')
self.assertOutputStub(out, builder_id=ring.id)
def test_ipv6_output(self):
ring = RingBuilder(8, 3, 1)
ring.add_dev({'weight': 100.0,
'region': 0,
'zone': 0,
'ip': '2001:db8:85a3::8a2e:370:7334',
'port': 6200,
'device': 'sda1',
'meta': 'some meta data',
})
ring.add_dev({'weight': 100.0,
'region': 1,
'zone': 1,
'ip': '127.0.0.1',
'port': 66201,
'device': 'sda2',
})
ring.add_dev({'weight': 10000.0,
'region': 2,
'zone': 2,
'ip': '2001:db8:85a3::8a2e:370:7336',
'port': 6202,
'device': 'sdc3',
'replication_ip': '127.0.10.127',
'replication_port': 7070,
})
ring.add_dev({'weight': 100.0,
'region': 3,
'zone': 3,
'ip': '2001:db8:85a3::8a2e:370:7337',
'port': 6203,
'device': 'sdd4',
'replication_ip': '7001:db8:85a3::8a2e:370:7337',
'replication_port': 11664,
})
ring.save(self.tmpfile)
out, err = self.run_srb('')
self.assertOutputStub(out, builder_id=ring.id)
def test_default_show_removed(self):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
ring = self.create_sample_ring()
# Note: it also sets device's weight to zero.
argv = ["", self.tmpfile, "remove", "--id", "1"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Setting another device's weight to zero to be sure we distinguish
# real removed device and device with zero weight.
argv = ["", self.tmpfile, "set_weight", "0", "--id", "3"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
expected = "%s, build version 6, id %s\n" \
"64 partitions, 3.000000 replicas, 4 regions, 4 zones, " \
"4 devices, 100.00 balance, 0.00 dispersion\n" \
"The minimum number of hours before a partition can be " \
"reassigned is 1 (0:00:00 remaining)\n" \
"The overload factor is 0.00%% (0.000000)\n" \
"Ring file %s.ring.gz not found, probably " \
"it hasn't been written yet\n" \
"Devices: id region zone ip address:port " \
"replication ip:port name weight " \
"partitions balance flags meta\n" \
" 0 0 0 127.0.0.1:6200 " \
" 127.0.0.1:6200 sda1 100.00" \
" 0 -100.00 some meta data\n" \
" 1 1 1 127.0.0.2:6201 " \
" 127.0.0.2:6201 sda2 0.00" \
" 0 0.00 DEL \n" \
" 2 2 2 127.0.0.3:6202 " \
" 127.0.0.3:6202 sdc3 100.00" \
" 0 -100.00 \n" \
" 3 3 3 127.0.0.4:6203 " \
" 127.0.0.4:6203 sdd4 0.00" \
" 0 0.00 \n" %\
(self.tmpfile, ring.id, self.tmpfile)
self.assertEqual(expected, mock_stdout.getvalue())
def test_default_sorted_output(self):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
# Create a sample ring and remove/add some devices.
now = time.time()
ring = self.create_sample_ring()
argv = ["", self.tmpfile, "add",
"--region", "1", "--zone", "2",
"--ip", "127.0.0.5", "--port", "6004",
"--replication-ip", "127.0.0.5",
"--replication-port", "6004",
"--device", "sda5", "--weight", "100.0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "remove", "--id", "0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "remove", "--id", "3"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = \
["", self.tmpfile, "add",
"--region", "2", "--zone", "1",
"--ip", "127.0.0.6", "--port", "6005",
"--replication-ip", "127.0.0.6",
"--replication-port", "6005",
"--device", "sdb6", "--weight", "100.0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# Check the order of the devices listed the output.
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout), mock.patch(
"sys.stderr", mock_stderr), mock.patch(
'swift.common.ring.builder.time', return_value=now):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
self.assertOutputStub(mock_stdout.getvalue(), builder_id=ring.id)
def test_default_ringfile_check(self):
self.create_sample_ring()
# ring file not created
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring_not_found_re = re.compile(r"Ring file .*\.ring\.gz not found")
self.assertTrue(ring_not_found_re.findall(mock_stdout.getvalue()))
# write ring file
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# ring file is up-to-date
mock_stdout = six.StringIO()
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring_up_to_date_re = re.compile(
r"Ring file .*\.ring\.gz is up-to-date"
)
self.assertTrue(ring_up_to_date_re.findall(mock_stdout.getvalue()))
# change builder (set weight)
argv = ["", self.tmpfile, "set_weight", "0", "--id", "3"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# ring file is obsolete after set_weight
mock_stdout = six.StringIO()
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring_obsolete_re = re.compile(r"Ring file .*\.ring\.gz is obsolete")
self.assertTrue(ring_obsolete_re.findall(mock_stdout.getvalue()))
# write ring file
argv = ["", self.tmpfile, "write_ring"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# ring file up-to-date again
mock_stdout = six.StringIO()
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
self.assertTrue(ring_up_to_date_re.findall(mock_stdout.getvalue()))
# Break ring file e.g. just make it empty
open('%s.ring.gz' % self.tmpfile, 'w').close()
# ring file is invalid
mock_stdout = six.StringIO()
argv = ["", self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring_invalid_re = re.compile(r"Ring file .*\.ring\.gz is invalid")
self.assertTrue(ring_invalid_re.findall(mock_stdout.getvalue()))
def test_default_no_device_ring_without_exception(self):
self.create_sample_ring()
# remove devices from ring file
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
for device in ["d0", "d1", "d2", "d3"]:
argv = ["", self.tmpfile, "remove", device]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# default ring file without exception
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
argv = ["", self.tmpfile, "default"]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
deleted_dev_list = (
" 0 0 0 127.0.0.1:6200 127.0.0.1:6200 "
"sda1 0.00 0 0.00 DEL some meta data\n"
" 1 1 1 127.0.0.2:6201 127.0.0.2:6201 "
"sda2 0.00 0 0.00 DEL \n"
" 2 2 2 127.0.0.3:6202 127.0.0.3:6202 "
"sdc3 0.00 0 0.00 DEL \n"
" 3 3 3 127.0.0.4:6203 127.0.0.4:6203 "
"sdd4 0.00 0 0.00 DEL \n")
output = mock_stdout.getvalue()
self.assertIn("64 partitions", output)
self.assertIn("all devices have been deleted", output)
self.assertIn("all devices have been deleted", output)
self.assertIn(deleted_dev_list, output)
def test_empty_ring(self):
self.create_sample_ring(empty=True)
# default ring file without exception
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
argv = ["", self.tmpfile, "default"]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
output = mock_stdout.getvalue()
self.assertIn("64 partitions", output)
self.assertIn("There are no devices in this ring", output)
def test_pretend_min_part_hours_passed(self):
self.run_srb("create", 8, 3, 1)
argv_pretend = ["", self.tmpfile, "pretend_min_part_hours_passed"]
# pretend_min_part_hours_passed should success, even not rebalanced
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_pretend)
self.run_srb("add",
"r1z1-10.1.1.1:2345/sda", 100.0,
"r1z1-10.1.1.1:2345/sdb", 100.0,
"r1z1-10.1.1.1:2345/sdc", 100.0)
argv_rebalance = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_rebalance)
self.run_srb("add", "r1z1-10.1.1.1:2345/sdd", 100.0)
# rebalance fail without pretend_min_part_hours_passed
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv_rebalance)
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_pretend)
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_rebalance)
def test_rebalance(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "rebalance", "3"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertTrue(ring.validate())
def test_rebalance_no_device_change(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.rebalance()
ring.save(self.tmpfile)
# Test No change to the device
argv = ["", self.tmpfile, "rebalance", "3"]
with mock.patch('swift.common.ring.RingBuilder.save') as mock_save:
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
self.assertEqual(len(mock_save.calls), 0)
def test_rebalance_saves_dispersion_improvement(self):
# We set up a situation where dispersion improves but balance
# doesn't. We construct a ring with one zone, then add a second zone
# concurrently with a new device in the first zone. That first
# device won't acquire any partitions, so the ring's balance won't
# change. However, dispersion will improve.
ring = RingBuilder(6, 6, 1)
devs = ('d%s' % i for i in itertools.count())
for i in range(6):
ring.add_dev({
'region': 1, 'zone': 1,
'ip': '10.0.0.1', 'port': 20001, 'weight': 1000,
'device': next(devs)})
ring.rebalance()
# The last guy in zone 1
ring.add_dev({
'region': 1, 'zone': 1,
'ip': '10.0.0.1', 'port': 20001, 'weight': 1000,
'device': next(devs)})
# Add zone 2 (same total weight as zone 1)
for i in range(7):
ring.add_dev({
'region': 1, 'zone': 2,
'ip': '10.0.0.2', 'port': 20001, 'weight': 1000,
'device': next(devs)})
ring.pretend_min_part_hours_passed()
ring.save(self.tmpfile)
del ring
# Rebalance once: this gets 1/6th replica into zone 2; the ring is
# saved because devices changed.
argv = ["", self.tmpfile, "rebalance", "5759339"]
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
rb = RingBuilder.load(self.tmpfile)
self.assertEqual(rb.dispersion, 33.333333333333336)
self.assertEqual(rb.get_balance(), 100)
self.run_srb('pretend_min_part_hours_passed')
# Rebalance again: this gets 2/6th replica into zone 2, but no devices
# changed and the balance stays the same. The only improvement is
# dispersion.
captured = {}
def capture_save(rb, path):
captured['dispersion'] = rb.dispersion
captured['balance'] = rb.get_balance()
# The warning is benign; it's just telling the user to keep on
# rebalancing. The important assertion is that the builder was
# saved.
with mock.patch('swift.common.ring.RingBuilder.save', capture_save):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
self.assertEqual(captured, {
'dispersion': 16.666666666666668,
'balance': 100,
})
def test_rebalance_no_devices(self):
# Test no devices
argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_rebalance_remove_zero_weighted_device(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.set_dev_weight(3, 0.0)
ring.rebalance()
ring.pretend_min_part_hours_passed()
ring.remove_dev(3)
ring.save(self.tmpfile)
# Test rebalance after remove 0 weighted device
argv = ["", self.tmpfile, "rebalance", "3"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertTrue(ring.validate())
self.assertIsNone(ring.devs[3])
def test_rebalance_resets_time_remaining(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
time_path = 'swift.common.ring.builder.time'
argv = ["", self.tmpfile, "rebalance", "3"]
time = 0
# first rebalance, should have 1 hour left before next rebalance
time += 3600
with mock.patch(time_path, return_value=time):
self.assertEqual(ring.min_part_seconds_left, 0)
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 3600)
# min part hours passed, change ring and save for rebalance
ring.set_dev_weight(0, ring.devs[0]['weight'] * 2)
ring.save(self.tmpfile)
# second rebalance, should have 1 hour left
time += 3600
with mock.patch(time_path, return_value=time):
self.assertEqual(ring.min_part_seconds_left, 0)
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 3600)
def test_time_remaining(self):
self.create_sample_ring()
now = time.time()
with mock.patch('swift.common.ring.builder.time', return_value=now):
self.run_srb('rebalance')
out, err = self.run_srb('rebalance')
self.assertIn('No partitions could be reassigned', out)
self.assertIn('must be at least min_part_hours', out)
self.assertIn('1 hours (1:00:00 remaining)', out)
the_future = now + 3600
with mock.patch('swift.common.ring.builder.time',
return_value=the_future):
out, err = self.run_srb('rebalance')
self.assertIn('No partitions could be reassigned', out)
self.assertIn('There is no need to do so at this time', out)
# or you can pretend_min_part_hours_passed
self.run_srb('pretend_min_part_hours_passed')
out, err = self.run_srb('rebalance')
self.assertIn('No partitions could be reassigned', out)
self.assertIn('There is no need to do so at this time', out)
def test_rebalance_failure_does_not_reset_last_moves_epoch(self):
ring = RingBuilder(8, 3, 1)
ring.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6010, 'device': 'sda1'})
ring.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6020, 'device': 'sdb1'})
ring.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6030, 'device': 'sdc1'})
time_path = 'swift.common.ring.builder.time'
argv = ["", self.tmpfile, "rebalance", "3"]
with mock.patch(time_path, return_value=0):
ring.rebalance()
ring.save(self.tmpfile)
# min part hours not passed
with mock.patch(time_path, return_value=(3600 * 0.6)):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 3600 * 0.4)
ring.save(self.tmpfile)
# min part hours passed, no partitions need to be moved
with mock.patch(time_path, return_value=(3600 * 1.5)):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 0)
def test_rebalance_with_seed(self):
self.create_sample_ring()
# Test rebalance using explicit seed parameter
argv = ["", self.tmpfile, "rebalance", "--seed", "2"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_rebalance_removed_devices(self):
self.create_sample_ring()
argvs = [
["", self.tmpfile, "rebalance", "3"],
["", self.tmpfile, "remove", "d0"],
["", self.tmpfile, "rebalance", "3"]]
for argv in argvs:
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_rebalance_min_part_hours_not_passed(self):
self.create_sample_ring()
argvs = [
["", self.tmpfile, "rebalance", "3"],
["", self.tmpfile, "set_weight", "d0", "1000"]]
for argv in argvs:
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
last_replica2part2dev = ring._replica2part2dev
mock_stdout = six.StringIO()
argv = ["", self.tmpfile, "rebalance", "3"]
with mock.patch("sys.stdout", mock_stdout):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
expected = "No partitions could be reassigned.\n" + \
"The time between rebalances must be " + \
"at least min_part_hours: 1 hours"
self.assertTrue(expected in mock_stdout.getvalue())
# Messages can be faked, so let's assure that the partition assignment
# did not change at all, despite the warning
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(last_replica2part2dev, ring._replica2part2dev)
def test_rebalance_part_power_increase(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
ring.next_part_power = 1
ring.save(self.tmpfile)
argv = ["", self.tmpfile, "rebalance", "3"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_write_ring(self):
self.create_sample_ring()
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "write_ring"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_write_empty_ring(self):
ring = RingBuilder(6, 3, 1)
ring.save(self.tmpfile)
exp_results = {'valid_exit_codes': [2]}
out, err = self.run_srb("write_ring", exp_results=exp_results)
self.assertEqual('Unable to write empty ring.\n', out)
def test_write_builder(self):
# Test builder file already exists
self.create_sample_ring()
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "write_builder"]
exp_results = {'valid_exit_codes': [2]}
self.run_srb(*argv, exp_results=exp_results)
def test_write_builder_fractional_replicas(self):
# Test builder file already exists
self.create_sample_ring(replicas=1.2)
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring_file = os.path.join(os.path.dirname(self.tmpfile),
os.path.basename(self.tmpfile) + ".ring.gz")
os.remove(self.tmpfile) # loses file...
argv = ["", ring_file, "write_builder", "24"]
self.assertIsNone(ringbuilder.main(argv))
# Note that we've picked up an extension
builder = RingBuilder.load(self.tmpfile + '.builder')
# Version was recorded in the .ring.gz!
self.assertEqual(builder.version, 5)
# Note that this is different from the original! But it more-closely
# reflects the reality that we have an extra replica for 12 of 64 parts
self.assertEqual(builder.replicas, 1.1875)
def test_write_builder_no_version(self):
self.create_sample_ring()
rb = RingBuilder.load(self.tmpfile)
rb.rebalance()
# Make sure we write down the ring in the old way, with no version
rd = rb.get_ring()
rd.version = None
rd.save(self.tmpfile + ".ring.gz")
ring_file = os.path.join(os.path.dirname(self.tmpfile),
os.path.basename(self.tmpfile) + ".ring.gz")
os.remove(self.tmpfile) # loses file...
argv = ["", ring_file, "write_builder", "24"]
self.assertIsNone(ringbuilder.main(argv))
# Note that we've picked up an extension
builder = RingBuilder.load(self.tmpfile + '.builder')
# No version in the .ring.gz; default to 0
self.assertEqual(builder.version, 0)
def test_write_builder_after_device_removal(self):
# Test regenerating builder file after having removed a device
# and lost the builder file
self.create_sample_ring()
argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6200/sdb", "1.0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "add", "r1z1-127.0.0.1:6200/sdc", "1.0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
argv = ["", self.tmpfile, "remove", "--id", "0"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", self.tmpfile, "rebalance"]
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
backup_file = os.path.join(os.path.dirname(self.tmpfile),
os.path.basename(self.tmpfile) + ".ring.gz")
os.remove(self.tmpfile) # loses file...
argv = ["", backup_file, "write_builder", "24"]
self.assertIsNone(ringbuilder.main(argv))
rb = RingBuilder.load(self.tmpfile + '.builder')
self.assertIsNotNone(rb._last_part_moves)
rb._last_part_moves = None
rb.save(self.tmpfile)
argv = ["", self.tmpfile + '.builder', "rebalance"]
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
def test_warn_at_risk(self):
# check that warning is generated when rebalance does not achieve
# satisfactory balance
self.create_sample_ring()
orig_rebalance = RingBuilder.rebalance
fake_balance = 6
def fake_rebalance(builder_instance, *args, **kwargs):
parts, balance, removed_devs = orig_rebalance(builder_instance)
return parts, fake_balance, removed_devs
argv = ["", self.tmpfile, "rebalance"]
with mock.patch("swift.common.ring.builder.RingBuilder.rebalance",
fake_rebalance):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
# even when some overload is allowed
self.create_sample_ring(overload=0.05)
argv = ["", self.tmpfile, "rebalance"]
with mock.patch("swift.common.ring.builder.RingBuilder.rebalance",
fake_rebalance):
self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv)
def test_no_warn_when_balanced(self):
# check that no warning is generated when satisfactory balance is
# achieved...
self.create_sample_ring()
orig_rebalance = RingBuilder.rebalance
fake_balance = 5
def fake_rebalance(builder_instance, *args, **kwargs):
parts, balance, removed_devs = orig_rebalance(builder_instance)
return parts, fake_balance, removed_devs
argv = ["", self.tmpfile, "rebalance"]
with mock.patch("swift.common.ring.builder.RingBuilder.rebalance",
fake_rebalance):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
# ...or balance is within permitted overload
self.create_sample_ring(overload=0.06)
fake_balance = 6
argv = ["", self.tmpfile, "rebalance"]
with mock.patch("swift.common.ring.builder.RingBuilder.rebalance",
fake_rebalance):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_invalid_device_name(self):
self.create_sample_ring()
for device_name in ["", " ", " sda1", "sda1 ", " meta "]:
argv = ["",
self.tmpfile,
"add",
"r1z1-127.0.0.1:6200/%s" % device_name,
"1"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
argv = ["",
self.tmpfile,
"add",
"--region", "1",
"--zone", "1",
"--ip", "127.0.0.1",
"--port", "6200",
"--device", device_name,
"--weight", "100"]
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
def test_dispersion_command(self):
self.create_sample_ring()
self.run_srb('rebalance')
out, err = self.run_srb('dispersion -v')
self.assertIn('dispersion', out.lower())
self.assertFalse(err)
def test_dispersion_command_recalculate(self):
rb = RingBuilder(8, 3, 0)
for i in range(3):
i += 1
rb.add_dev({'region': 1, 'zone': i, 'weight': 1.0,
'ip': '127.0.0.%d' % i, 'port': 6000, 'device': 'sda'})
# extra device in z1
rb.add_dev({'region': 1, 'zone': 1, 'weight': 1.0,
'ip': '127.0.0.1', 'port': 6000, 'device': 'sdb'})
rb.rebalance()
self.assertEqual(rb.dispersion, 16.666666666666668)
# simulate an out-of-date dispersion calculation
rb.dispersion = 50
rb.save(self.tempfile)
old_version = rb.version
out, err = self.run_srb('dispersion')
self.assertIn('Dispersion is 50.000000', out)
out, err = self.run_srb('dispersion --recalculate')
self.assertIn('Dispersion is 16.666667', out)
rb = RingBuilder.load(self.tempfile)
self.assertEqual(rb.version, old_version + 1)
def test_use_ringfile_as_builderfile(self):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
argv = ["", self.tmpfile, "rebalance", "3"],
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
argv = ["", "%s.ring.gz" % self.tmpfile]
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv)
expected = "Note: using %s.builder instead of %s.ring.gz " \
"as builder file\n" \
"Ring Builder file does not exist: %s.builder\n" % (
self.tmpfile, self.tmpfile, self.tmpfile)
self.assertEqual(expected, mock_stdout.getvalue())
def test_main_no_arguments(self):
# Test calling main with no arguments
argv = []
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_main_single_argument(self):
# Test calling main with single argument
argv = [""]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_main_with_safe(self):
# Test calling main with '-safe' argument
self.create_sample_ring()
argv = ["-safe", self.tmpfile]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_remove_all_devices(self):
# Would block without the 'yes' argument
self.create_sample_ring()
argv = ["", self.tmpfile, "remove", "--weight", "100", "--yes"]
with Timeout(5):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_set_info_all_devices(self):
# Would block without the 'yes' argument
self.create_sample_ring()
argv = ["", self.tmpfile, "set_info", "--weight", "100",
"--change-meta", "something", "--yes"]
with Timeout(5):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
def test_set_weight_all_devices(self):
# Would block without the 'yes' argument
self.create_sample_ring()
argv = ["", self.tmpfile, "set_weight",
"--weight", "100", "200", "--yes"]
with Timeout(5):
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin):
def __init__(self, *args, **kwargs):
super(TestRebalanceCommand, self).__init__(*args, **kwargs)
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
tmpf = tempfile.NamedTemporaryFile(dir=self.tmpdir)
self.tempfile = self.tmpfile = tmpf.name
def tearDown(self):
try:
shutil.rmtree(self.tmpdir, True)
except OSError:
pass
def run_srb(self, *argv):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
srb_args = ["", self.tempfile] + [str(s) for s in argv]
try:
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
ringbuilder.main(srb_args)
except SystemExit as err:
if err.code not in (0, 1): # (success, warning)
raise
return (mock_stdout.getvalue(), mock_stderr.getvalue())
def test_debug(self):
# NB: getLogger(name) always returns the same object
rb_logger = logging.getLogger("swift.ring.builder")
try:
self.assertNotEqual(rb_logger.getEffectiveLevel(), logging.DEBUG)
self.run_srb("create", 8, 3, 1)
self.run_srb("add",
"r1z1-10.1.1.1:2345/sda", 100.0,
"r1z1-10.1.1.1:2345/sdb", 100.0,
"r1z1-10.1.1.1:2345/sdc", 100.0,
"r1z1-10.1.1.1:2345/sdd", 100.0)
self.run_srb("rebalance", "--debug")
self.assertEqual(rb_logger.getEffectiveLevel(), logging.DEBUG)
rb_logger.setLevel(logging.INFO)
self.run_srb("rebalance", "--debug", "123")
self.assertEqual(rb_logger.getEffectiveLevel(), logging.DEBUG)
rb_logger.setLevel(logging.INFO)
self.run_srb("rebalance", "123", "--debug")
self.assertEqual(rb_logger.getEffectiveLevel(), logging.DEBUG)
finally:
rb_logger.setLevel(logging.INFO) # silence other test cases
def test_rebalance_warning_appears(self):
self.run_srb("create", 8, 3, 24)
# all in one machine: totally balanceable
self.run_srb("add",
"r1z1-10.1.1.1:2345/sda", 100.0,
"r1z1-10.1.1.1:2345/sdb", 100.0,
"r1z1-10.1.1.1:2345/sdc", 100.0,
"r1z1-10.1.1.1:2345/sdd", 100.0)
out, err = self.run_srb("rebalance")
self.assertNotIn("rebalance/repush", out)
# 2 machines of equal size: balanceable, but not in one pass due to
# min_part_hours > 0
self.run_srb("add",
"r1z1-10.1.1.2:2345/sda", 100.0,
"r1z1-10.1.1.2:2345/sdb", 100.0,
"r1z1-10.1.1.2:2345/sdc", 100.0,
"r1z1-10.1.1.2:2345/sdd", 100.0)
self.run_srb("pretend_min_part_hours_passed")
out, err = self.run_srb("rebalance")
self.assertIn("rebalance/repush", out)
# after two passes, it's all balanced out
self.run_srb("pretend_min_part_hours_passed")
out, err = self.run_srb("rebalance")
self.assertNotIn("rebalance/repush", out)
def test_rebalance_warning_with_overload(self):
self.run_srb("create", 8, 3, 24)
self.run_srb("set_overload", 0.12)
# The ring's balance is at least 5, so normally we'd get a warning,
# but it's suppressed due to the overload factor.
self.run_srb("add",
"r1z1-10.1.1.1:2345/sda", 100.0,
"r1z1-10.1.1.1:2345/sdb", 100.0,
"r1z1-10.1.1.1:2345/sdc", 120.0)
out, err = self.run_srb("rebalance")
self.assertNotIn("rebalance/repush", out)
# Now we add in a really big device, but not enough partitions move
# to fill it in one pass, so we see the rebalance warning.
self.run_srb("add", "r1z1-10.1.1.1:2345/sdd", 99999.0)
self.run_srb("pretend_min_part_hours_passed")
out, err = self.run_srb("rebalance")
self.assertIn("rebalance/repush", out)
def test_cached_dispersion_value(self):
self.run_srb("create", 8, 3, 24)
self.run_srb("add",
"r1z1-10.1.1.1:2345/sda", 100.0,
"r1z1-10.1.1.1:2345/sdb", 100.0,
"r1z1-10.1.1.1:2345/sdc", 100.0,
"r1z1-10.1.1.1:2345/sdd", 100.0)
self.run_srb('rebalance')
out, err = self.run_srb() # list devices
self.assertIn('dispersion', out)
# remove cached dispersion value
builder = RingBuilder.load(self.tempfile)
builder.dispersion = None
builder.save(self.tempfile)
# now dispersion output is suppressed
out, err = self.run_srb() # list devices
self.assertNotIn('dispersion', out)
# but will show up after rebalance
self.run_srb('rebalance', '-f')
out, err = self.run_srb() # list devices
self.assertIn('dispersion', out)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/cli/test_ringbuilder.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for swift.cli.info"""
from argparse import Namespace
import os
import unittest
import mock
from shutil import rmtree
from tempfile import mkdtemp
import six
from six.moves import cStringIO as StringIO
from test.unit import patch_policies, write_fake_ring, skip_if_no_xattrs
from swift.common import ring, utils
from swift.common.swob import Request
from swift.common.storage_policy import StoragePolicy, POLICIES
from swift.cli.info import (print_db_info_metadata, print_ring_locations,
print_info, print_obj_metadata, print_obj,
InfoSystemExit, print_item_locations,
parse_get_node_args)
from swift.account.server import AccountController
from swift.container.server import ContainerController
from swift.container.backend import UNSHARDED, SHARDED
from swift.obj.diskfile import write_metadata
@patch_policies([StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
class TestCliInfoBase(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
self.orig_hp = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.HASH_PATH_PREFIX = b'info'
utils.HASH_PATH_SUFFIX = b'info'
self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_info')
utils.mkdirs(self.testdir)
rmtree(self.testdir)
utils.mkdirs(os.path.join(self.testdir, 'sda1'))
utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(self.testdir, 'sdb1'))
utils.mkdirs(os.path.join(self.testdir, 'sdb1', 'tmp'))
self.account_ring_path = os.path.join(self.testdir, 'account.ring.gz')
account_devs = [
{'ip': '127.0.0.1', 'port': 42},
{'ip': '127.0.0.2', 'port': 43},
]
write_fake_ring(self.account_ring_path, *account_devs)
self.container_ring_path = os.path.join(self.testdir,
'container.ring.gz')
container_devs = [
{'ip': '127.0.0.3', 'port': 42},
{'ip': '127.0.0.4', 'port': 43},
]
write_fake_ring(self.container_ring_path, *container_devs)
self.object_ring_path = os.path.join(self.testdir, 'object.ring.gz')
object_devs = [
{'ip': '127.0.0.3', 'port': 42},
{'ip': '127.0.0.4', 'port': 43},
]
write_fake_ring(self.object_ring_path, *object_devs)
# another ring for policy 1
self.one_ring_path = os.path.join(self.testdir, 'object-1.ring.gz')
write_fake_ring(self.one_ring_path, *object_devs)
# ... and another for policy 2
self.two_ring_path = os.path.join(self.testdir, 'object-2.ring.gz')
write_fake_ring(self.two_ring_path, *object_devs)
# ... and one for policy 3 with some v6 IPs in it
object_devs_ipv6 = [
{'ip': 'feed:face::dead:beef', 'port': 42},
{'ip': 'deca:fc0f:feeb:ad11::1', 'port': 43}
]
self.three_ring_path = os.path.join(self.testdir, 'object-3.ring.gz')
write_fake_ring(self.three_ring_path, *object_devs_ipv6)
def tearDown(self):
utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = self.orig_hp
rmtree(os.path.dirname(self.testdir))
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
with self.assertRaises(exc) as ctx:
func(*args, **kwargs)
self.assertIn(msg, str(ctx.exception))
class TestCliInfo(TestCliInfoBase):
def test_print_db_info_metadata(self):
self.assertRaisesMessage(ValueError, 'Wrong DB type',
print_db_info_metadata, 't', {}, {})
self.assertRaisesMessage(ValueError, 'DB info is None',
print_db_info_metadata, 'container', None, {})
self.assertRaisesMessage(ValueError, 'Info is incomplete',
print_db_info_metadata, 'container', {}, {})
info = {
'account': 'acct',
'is_deleted': False,
'created_at': 100.1,
'put_timestamp': 106.3,
'delete_timestamp': 107.9,
'status_changed_at': 108.3,
'container_count': '3',
'object_count': '20',
'bytes_used': '42',
'hash': 'abaddeadbeefcafe',
'id': 'abadf100d0ddba11',
}
md = {'x-account-meta-mydata': ('swift', '0000000000.00000'),
'x-other-something': ('boo', '0000000000.00000')}
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('account', info, md)
exp_out = '''Path: /acct
Account: acct
Deleted: False
Account Hash: dc5be2aa4347a22a0fee6bc7de505b47
Metadata:
Created at: 1970-01-01T00:01:40.100000 (100.1)
Put Timestamp: 1970-01-01T00:01:46.300000 (106.3)
Delete Timestamp: 1970-01-01T00:01:47.900000 (107.9)
Status Timestamp: 1970-01-01T00:01:48.300000 (108.3)
Container Count: 3
Object Count: 20
Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
X-Other-Something: boo
No system metadata found in db file
User Metadata:
x-account-meta-mydata: swift'''
self.assertEqual(out.getvalue().strip().split('\n'),
exp_out.split('\n'))
info = dict(
account='acct',
container='cont',
storage_policy_index=0,
created_at='0000000100.10000',
put_timestamp='0000000106.30000',
delete_timestamp='0000000107.90000',
status_changed_at='0000000108.30000',
object_count='20',
bytes_used='42',
reported_put_timestamp='0000010106.30000',
reported_delete_timestamp='0000010107.90000',
reported_object_count='20',
reported_bytes_used='42',
x_container_foo='bar',
x_container_bar='goo',
db_state=UNSHARDED,
is_root=True,
is_deleted=False,
hash='abaddeadbeefcafe',
id='abadf100d0ddba11')
md = {'x-container-sysmeta-mydata': ('swift', '0000000000.00000')}
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('container', info, md, True)
exp_out = '''Path: /acct/cont
Account: acct
Container: cont
Deleted: False
Container Hash: d49d0ecbb53be1fcc49624f2f7c7ccae
Metadata:
Created at: 1970-01-01T00:01:40.100000 (0000000100.10000)
Put Timestamp: 1970-01-01T00:01:46.300000 (0000000106.30000)
Delete Timestamp: 1970-01-01T00:01:47.900000 (0000000107.90000)
Status Timestamp: 1970-01-01T00:01:48.300000 (0000000108.30000)
Object Count: 20
Bytes Used: 42
Storage Policy: %s (0)
Reported Put Timestamp: 1970-01-01T02:48:26.300000 (0000010106.30000)
Reported Delete Timestamp: 1970-01-01T02:48:27.900000 (0000010107.90000)
Reported Object Count: 20
Reported Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
X-Container-Bar: goo
X-Container-Foo: bar
System Metadata:
mydata: swift
No user metadata found in db file
Sharding Metadata:
Type: root
State: unsharded''' % POLICIES[0].name
self.assertEqual(sorted(out.getvalue().strip().split('\n')),
sorted(exp_out.split('\n')))
info = {
'account': 'acct',
'is_deleted': True,
'created_at': 100.1,
'put_timestamp': 106.3,
'delete_timestamp': 107.9,
'status_changed_at': 108.3,
'container_count': '3',
'object_count': '20',
'bytes_used': '42',
'hash': 'abaddeadbeefcafe',
'id': 'abadf100d0ddba11',
}
md = {'x-account-meta-mydata': ('swift', '0000000000.00000'),
'x-other-something': ('boo', '0000000000.00000')}
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('account', info, md)
exp_out = '''Path: /acct
Account: acct
Deleted: True
Account Hash: dc5be2aa4347a22a0fee6bc7de505b47
Metadata:
Created at: 1970-01-01T00:01:40.100000 (100.1)
Put Timestamp: 1970-01-01T00:01:46.300000 (106.3)
Delete Timestamp: 1970-01-01T00:01:47.900000 (107.9)
Status Timestamp: 1970-01-01T00:01:48.300000 (108.3)
Container Count: 3
Object Count: 20
Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
X-Other-Something: boo
No system metadata found in db file
User Metadata:
x-account-meta-mydata: swift'''
self.assertEqual(sorted(out.getvalue().strip().split('\n')),
sorted(exp_out.split('\n')))
def test_print_db_info_metadata_with_shard_ranges(self):
shard_ranges = [utils.ShardRange(
name='.sharded_a/shard_range_%s' % i,
timestamp=utils.Timestamp(i), lower='%da' % i,
upper='%dz' % i, object_count=i, bytes_used=i,
meta_timestamp=utils.Timestamp(i)) for i in range(1, 4)]
shard_ranges[0].state = utils.ShardRange.CLEAVED
shard_ranges[1].state = utils.ShardRange.CREATED
info = dict(
account='acct',
container='cont',
storage_policy_index=0,
created_at='0000000100.10000',
put_timestamp='0000000106.30000',
delete_timestamp='0000000107.90000',
status_changed_at='0000000108.30000',
object_count='20',
bytes_used='42',
reported_put_timestamp='0000010106.30000',
reported_delete_timestamp='0000010107.90000',
reported_object_count='20',
reported_bytes_used='42',
db_state=SHARDED,
is_root=True,
shard_ranges=shard_ranges,
is_deleted=False,
hash='abaddeadbeefcafe',
id='abadf100d0ddba11')
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('container', info, {}, verbose=True)
exp_out = '''Path: /acct/cont
Account: acct
Container: cont
Deleted: False
Container Hash: d49d0ecbb53be1fcc49624f2f7c7ccae
Metadata:
Created at: 1970-01-01T00:01:40.100000 (0000000100.10000)
Put Timestamp: 1970-01-01T00:01:46.300000 (0000000106.30000)
Delete Timestamp: 1970-01-01T00:01:47.900000 (0000000107.90000)
Status Timestamp: 1970-01-01T00:01:48.300000 (0000000108.30000)
Object Count: 20
Bytes Used: 42
Storage Policy: %s (0)
Reported Put Timestamp: 1970-01-01T02:48:26.300000 (0000010106.30000)
Reported Delete Timestamp: 1970-01-01T02:48:27.900000 (0000010107.90000)
Reported Object Count: 20
Reported Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
No system metadata found in db file
No user metadata found in db file
Sharding Metadata:
Type: root
State: sharded
Shard Ranges (3):
States:
found: 1
created: 1
cleaved: 1
Name: .sharded_a/shard_range_1
lower: '1a', upper: '1z'
Object Count: 1, Bytes Used: 1, State: cleaved (30)
Created at: 1970-01-01T00:00:01.000000 (0000000001.00000)
Meta Timestamp: 1970-01-01T00:00:01.000000 (0000000001.00000)
Name: .sharded_a/shard_range_2
lower: '2a', upper: '2z'
Object Count: 2, Bytes Used: 2, State: created (20)
Created at: 1970-01-01T00:00:02.000000 (0000000002.00000)
Meta Timestamp: 1970-01-01T00:00:02.000000 (0000000002.00000)
Name: .sharded_a/shard_range_3
lower: '3a', upper: '3z'
Object Count: 3, Bytes Used: 3, State: found (10)
Created at: 1970-01-01T00:00:03.000000 (0000000003.00000)
Meta Timestamp: 1970-01-01T00:00:03.000000 (0000000003.00000)''' %\
POLICIES[0].name
self.assertEqual(out.getvalue().strip().split('\n'),
exp_out.strip().split('\n'))
def test_print_db_info_metadata_with_many_shard_ranges(self):
shard_ranges = [utils.ShardRange(
name='.sharded_a/shard_range_%s' % i,
timestamp=utils.Timestamp(i), lower='%02da' % i,
upper='%02dz' % i, object_count=i, bytes_used=i,
meta_timestamp=utils.Timestamp(i)) for i in range(1, 20)]
shard_ranges[0].state = utils.ShardRange.CLEAVED
shard_ranges[1].state = utils.ShardRange.CREATED
info = dict(
account='acct',
container='cont',
storage_policy_index=0,
created_at='0000000100.10000',
put_timestamp='0000000106.30000',
delete_timestamp='0000000107.90000',
status_changed_at='0000000108.30000',
object_count='20',
bytes_used='42',
reported_put_timestamp='0000010106.30000',
reported_delete_timestamp='0000010107.90000',
reported_object_count='20',
reported_bytes_used='42',
db_state=SHARDED,
is_root=True,
shard_ranges=shard_ranges,
is_deleted=False,
hash='abaddeadbeefcafe',
id='abadf100d0ddba11')
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('container', info, {})
exp_out = '''
Path: /acct/cont
Account: acct
Container: cont
Deleted: False
Container Hash: d49d0ecbb53be1fcc49624f2f7c7ccae
Metadata:
Created at: 1970-01-01T00:01:40.100000 (0000000100.10000)
Put Timestamp: 1970-01-01T00:01:46.300000 (0000000106.30000)
Delete Timestamp: 1970-01-01T00:01:47.900000 (0000000107.90000)
Status Timestamp: 1970-01-01T00:01:48.300000 (0000000108.30000)
Object Count: 20
Bytes Used: 42
Storage Policy: %s (0)
Reported Put Timestamp: 1970-01-01T02:48:26.300000 (0000010106.30000)
Reported Delete Timestamp: 1970-01-01T02:48:27.900000 (0000010107.90000)
Reported Object Count: 20
Reported Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
No system metadata found in db file
No user metadata found in db file
Sharding Metadata:
Type: root
State: sharded
Shard Ranges (19):
States:
found: 17
created: 1
cleaved: 1
(Use -v/--verbose to show more Shard Ranges details)
''' %\
POLICIES[0].name
self.assertEqual(out.getvalue().strip().split('\n'),
exp_out.strip().split('\n'))
def test_print_db_info_metadata_with_shard_ranges_bis(self):
shard_ranges = [utils.ShardRange(
name='.sharded_a/shard_range_%s' % i,
timestamp=utils.Timestamp(i), lower=u'%d\u30a2' % i,
upper=u'%d\u30e4' % i, object_count=i, bytes_used=i,
meta_timestamp=utils.Timestamp(i)) for i in range(1, 4)]
shard_ranges[0].state = utils.ShardRange.CLEAVED
shard_ranges[1].state = utils.ShardRange.CREATED
info = dict(
account='acct',
container='cont',
storage_policy_index=0,
created_at='0000000100.10000',
put_timestamp='0000000106.30000',
delete_timestamp='0000000107.90000',
status_changed_at='0000000108.30000',
object_count='20',
bytes_used='42',
reported_put_timestamp='0000010106.30000',
reported_delete_timestamp='0000010107.90000',
reported_object_count='20',
reported_bytes_used='42',
db_state=SHARDED,
is_root=True,
shard_ranges=shard_ranges)
info['hash'] = 'abaddeadbeefcafe'
info['id'] = 'abadf100d0ddba11'
info['is_deleted'] = False
out = StringIO()
with mock.patch('sys.stdout', out):
print_db_info_metadata('container', info, {}, verbose=True)
if six.PY2:
s_a = '\\xe3\\x82\\xa2'
s_ya = '\\xe3\\x83\\xa4'
else:
s_a = '\u30a2'
s_ya = '\u30e4'
exp_out = '''Path: /acct/cont
Account: acct
Container: cont
Deleted: False
Container Hash: d49d0ecbb53be1fcc49624f2f7c7ccae
Metadata:
Created at: 1970-01-01T00:01:40.100000 (0000000100.10000)
Put Timestamp: 1970-01-01T00:01:46.300000 (0000000106.30000)
Delete Timestamp: 1970-01-01T00:01:47.900000 (0000000107.90000)
Status Timestamp: 1970-01-01T00:01:48.300000 (0000000108.30000)
Object Count: 20
Bytes Used: 42
Storage Policy: %s (0)
Reported Put Timestamp: 1970-01-01T02:48:26.300000 (0000010106.30000)
Reported Delete Timestamp: 1970-01-01T02:48:27.900000 (0000010107.90000)
Reported Object Count: 20
Reported Bytes Used: 42
Chexor: abaddeadbeefcafe
UUID: abadf100d0ddba11
No system metadata found in db file
No user metadata found in db file
Sharding Metadata:
Type: root
State: sharded
Shard Ranges (3):
States:
found: 1
created: 1
cleaved: 1
Name: .sharded_a/shard_range_1
lower: '1%s', upper: '1%s'
Object Count: 1, Bytes Used: 1, State: cleaved (30)
Created at: 1970-01-01T00:00:01.000000 (0000000001.00000)
Meta Timestamp: 1970-01-01T00:00:01.000000 (0000000001.00000)
Name: .sharded_a/shard_range_2
lower: '2%s', upper: '2%s'
Object Count: 2, Bytes Used: 2, State: created (20)
Created at: 1970-01-01T00:00:02.000000 (0000000002.00000)
Meta Timestamp: 1970-01-01T00:00:02.000000 (0000000002.00000)
Name: .sharded_a/shard_range_3
lower: '3%s', upper: '3%s'
Object Count: 3, Bytes Used: 3, State: found (10)
Created at: 1970-01-01T00:00:03.000000 (0000000003.00000)
Meta Timestamp: 1970-01-01T00:00:03.000000 (0000000003.00000)''' %\
(POLICIES[0].name, s_a, s_ya, s_a, s_ya, s_a, s_ya)
self.assertEqual(out.getvalue().strip().split('\n'),
exp_out.strip().split('\n'))
def test_print_ring_locations_invalid_args(self):
self.assertRaises(ValueError, print_ring_locations,
None, 'dir', 'acct')
self.assertRaises(ValueError, print_ring_locations,
[], None, 'acct')
self.assertRaises(ValueError, print_ring_locations,
[], 'dir', None)
self.assertRaises(ValueError, print_ring_locations,
[], 'dir', 'acct', 'con')
self.assertRaises(ValueError, print_ring_locations,
[], 'dir', 'acct', obj='o')
def test_print_ring_locations_account(self):
out = StringIO()
with mock.patch('sys.stdout', out):
acctring = ring.Ring(self.testdir, ring_name='account')
print_ring_locations(acctring, 'dir', 'acct')
exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '3',
'b47', 'dc5be2aa4347a22a0fee6bc7de505b47')
self.assertIn(exp_db, out.getvalue())
self.assertIn('127.0.0.1', out.getvalue())
self.assertIn('127.0.0.2', out.getvalue())
def test_print_ring_locations_container(self):
out = StringIO()
with mock.patch('sys.stdout', out):
contring = ring.Ring(self.testdir, ring_name='container')
print_ring_locations(contring, 'dir', 'acct', 'con')
exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '1',
'fe6', '63e70955d78dfc62821edc07d6ec1fe6')
self.assertIn(exp_db, out.getvalue())
def test_print_ring_locations_obj(self):
out = StringIO()
with mock.patch('sys.stdout', out):
objring = ring.Ring(self.testdir, ring_name='object')
print_ring_locations(objring, 'dir', 'acct', 'con', 'obj')
exp_obj = os.path.join('${DEVICE:-/srv/node*}', 'sda1', 'dir', '1',
'117', '4a16154fc15c75e26ba6afadf5b1c117')
self.assertIn(exp_obj, out.getvalue())
def test_print_ring_locations_partition_number(self):
out = StringIO()
with mock.patch('sys.stdout', out):
objring = ring.Ring(self.testdir, ring_name='object')
print_ring_locations(objring, 'objects', None, tpart='1')
exp_obj1 = os.path.join('${DEVICE:-/srv/node*}', 'sda1',
'objects', '1')
exp_obj2 = os.path.join('${DEVICE:-/srv/node*}', 'sdb1',
'objects', '1')
self.assertIn(exp_obj1, out.getvalue())
self.assertIn(exp_obj2, out.getvalue())
def test_print_item_locations_invalid_args(self):
# No target specified
self.assertRaises(InfoSystemExit, print_item_locations,
None)
# Need a ring or policy
self.assertRaises(InfoSystemExit, print_item_locations,
None, account='account', obj='object')
# No account specified
self.assertRaises(InfoSystemExit, print_item_locations,
None, container='con')
# No policy named 'xyz' (unrecognized policy)
self.assertRaises(InfoSystemExit, print_item_locations,
None, obj='object', policy_name='xyz')
# No container specified
objring = ring.Ring(self.testdir, ring_name='object')
self.assertRaises(InfoSystemExit, print_item_locations,
objring, account='account', obj='object')
def test_print_item_locations_ring_policy_mismatch_no_target(self):
out = StringIO()
with mock.patch('sys.stdout', out):
objring = ring.Ring(self.testdir, ring_name='object')
# Test mismatch of ring and policy name (valid policy)
self.assertRaises(InfoSystemExit, print_item_locations,
objring, policy_name='zero')
self.assertIn('Warning: mismatch between ring and policy name!',
out.getvalue())
self.assertIn('No target specified', out.getvalue())
def test_print_item_locations_invalid_policy_no_target(self):
out = StringIO()
policy_name = 'nineteen'
with mock.patch('sys.stdout', out):
objring = ring.Ring(self.testdir, ring_name='object')
self.assertRaises(InfoSystemExit, print_item_locations,
objring, policy_name=policy_name)
exp_msg = 'Warning: Policy %s is not valid' % policy_name
self.assertIn(exp_msg, out.getvalue())
self.assertIn('No target specified', out.getvalue())
def test_print_item_locations_policy_object(self):
out = StringIO()
part = '1'
with mock.patch('sys.stdout', out):
print_item_locations(None, partition=part, policy_name='zero',
swift_dir=self.testdir)
exp_part_msg = 'Partition\t%s' % part
self.assertIn(exp_part_msg, out.getvalue())
self.assertNotIn('Account', out.getvalue())
self.assertNotIn('Container', out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_dashed_ring_name_partition(self):
out = StringIO()
part = '1'
with mock.patch('sys.stdout', out):
print_item_locations(None, policy_name='one',
ring_name='foo-bar', partition=part,
swift_dir=self.testdir)
exp_part_msg = 'Partition\t%s' % part
self.assertIn(exp_part_msg, out.getvalue())
self.assertNotIn('Account', out.getvalue())
self.assertNotIn('Container', out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_with_ring(self):
out = StringIO()
account = 'account'
with mock.patch('sys.stdout', out):
account_ring = ring.Ring(self.testdir, ring_name=account)
print_item_locations(account_ring, account=account)
exp_msg = 'Account \t%s' % account
self.assertIn(exp_msg, out.getvalue())
exp_warning = 'Warning: account specified ' + \
'but ring not named "account"'
self.assertIn(exp_warning, out.getvalue())
exp_acct_msg = 'Account \t%s' % account
self.assertIn(exp_acct_msg, out.getvalue())
self.assertNotIn('Container', out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_no_ring(self):
out = StringIO()
account = 'account'
with mock.patch('sys.stdout', out):
print_item_locations(None, account=account,
swift_dir=self.testdir)
exp_acct_msg = 'Account \t%s' % account
self.assertIn(exp_acct_msg, out.getvalue())
self.assertNotIn('Container', out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_ring(self):
out = StringIO()
account = 'account'
container = 'container'
with mock.patch('sys.stdout', out):
container_ring = ring.Ring(self.testdir, ring_name='container')
print_item_locations(container_ring, account=account,
container=container)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_no_ring(self):
out = StringIO()
account = 'account'
container = 'container'
with mock.patch('sys.stdout', out):
print_item_locations(None, account=account,
container=container, swift_dir=self.testdir)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_object_ring(self):
out = StringIO()
account = 'account'
container = 'container'
obj = 'object'
with mock.patch('sys.stdout', out):
object_ring = ring.Ring(self.testdir, ring_name='object')
print_item_locations(object_ring, ring_name='object',
account=account, container=container,
obj=obj)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
exp_obj_msg = 'Object \t%s' % obj
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
self.assertIn(exp_obj_msg, out.getvalue())
def test_print_item_locations_account_container_object_dashed_ring(self):
out = StringIO()
account = 'account'
container = 'container'
obj = 'object'
with mock.patch('sys.stdout', out):
object_ring = ring.Ring(self.testdir, ring_name='object-1')
print_item_locations(object_ring, ring_name='object-1',
account=account, container=container,
obj=obj)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
exp_obj_msg = 'Object \t%s' % obj
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
self.assertIn(exp_obj_msg, out.getvalue())
def test_print_info(self):
db_file = 'foo'
self.assertRaises(InfoSystemExit, print_info, 'object', db_file)
db_file = os.path.join(self.testdir, './acct.db')
self.assertRaises(InfoSystemExit, print_info, 'account', db_file)
controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/1/acct', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(controller)
self.assertEqual(resp.status_int, 201)
out = StringIO()
exp_raised = False
with mock.patch('sys.stdout', out):
db_file = os.path.join(self.testdir, 'sda1', 'accounts',
'1', 'b47',
'dc5be2aa4347a22a0fee6bc7de505b47',
'dc5be2aa4347a22a0fee6bc7de505b47.db')
print_info('account', db_file, swift_dir=self.testdir)
self.assertGreater(len(out.getvalue().strip()), 800)
controller = ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/1/acct/cont',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(controller)
self.assertEqual(resp.status_int, 201)
out = StringIO()
exp_raised = False
with mock.patch('sys.stdout', out):
db_file = os.path.join(self.testdir, 'sda1', 'containers',
'1', 'cae',
'd49d0ecbb53be1fcc49624f2f7c7ccae',
'd49d0ecbb53be1fcc49624f2f7c7ccae.db')
orig_cwd = os.getcwd()
try:
os.chdir(os.path.dirname(db_file))
print_info('container', os.path.basename(db_file),
swift_dir='/dev/null')
except Exception:
exp_raised = True
finally:
os.chdir(orig_cwd)
if exp_raised:
self.fail("Unexpected exception raised")
else:
self.assertGreater(len(out.getvalue().strip()), 600)
out = StringIO()
exp_raised = False
with mock.patch('sys.stdout', out):
db_file = os.path.join(self.testdir, 'sda1', 'containers',
'1', 'cae',
'd49d0ecbb53be1fcc49624f2f7c7ccae',
'd49d0ecbb53be1fcc49624f2f7c7ccae.db')
orig_cwd = os.getcwd()
try:
os.chdir(os.path.dirname(db_file))
print_info('account', os.path.basename(db_file),
swift_dir='/dev/null')
except InfoSystemExit:
exp_raised = True
finally:
os.chdir(orig_cwd)
if exp_raised:
exp_out = 'Does not appear to be a DB of type "account":' \
' ./d49d0ecbb53be1fcc49624f2f7c7ccae.db'
self.assertEqual(out.getvalue().strip(), exp_out)
else:
self.fail("Expected an InfoSystemExit exception to be raised")
def test_parse_get_node_args(self):
# Capture error messages
# (without any parameters)
options = Namespace(policy_name=None, partition=None, quoted=None)
args = ''
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a c
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a c'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a c o
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a c o'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a/c
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a/c'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a/c/o
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a/c/o'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# account container junk/test.ring.gz
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'account container junk/test.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# account container object junk/test.ring.gz
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'account container object junk/test.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# object.ring.gz(without any arguments i.e. a c o)
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Ring file does not exist',
parse_get_node_args, options, args.split())
# Valid policy
# -P zero
options = Namespace(policy_name='zero', partition=None, quoted=None)
args = ''
self.assertRaisesMessage(InfoSystemExit,
'No target specified',
parse_get_node_args, options, args.split())
# -P one a/c/o
options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'a/c/o'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['a', 'c', 'o'])
# -P one account container photos/cat.jpg
options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account container photos/cat.jpg'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# -P one account/container/photos/cat.jpg
options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account/container/photos/cat.jpg'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# -P one account/container/junk/test.ring.gz(object endswith 'ring.gz')
options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account/container/junk/test.ring.gz'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
# -P two a c o hooya
options = Namespace(policy_name='two', partition=None, quoted=None)
args = 'a c o hooya'
self.assertRaisesMessage(InfoSystemExit,
'Invalid arguments',
parse_get_node_args, options, args.split())
# -P zero -p 1
options = Namespace(policy_name='zero', partition='1', quoted=None)
args = ''
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertFalse(args)
# -P one -p 1 a/c/o
options = Namespace(policy_name='one', partition='1', quoted=None)
args = 'a/c/o'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['a', 'c', 'o'])
# -P two -p 1 a c o hooya
options = Namespace(policy_name='two', partition='1', quoted=None)
args = 'a c o hooya'
self.assertRaisesMessage(InfoSystemExit,
'Invalid arguments',
parse_get_node_args, options, args.split())
# Invalid policy
# -P undefined
options = Namespace(policy_name='undefined')
args = ''
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args.split())
# -P undefined -p 1
options = Namespace(policy_name='undefined', partition='1')
args = ''
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args.split())
# -P undefined a
options = Namespace(policy_name='undefined')
args = 'a'
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args.split())
# -P undefined a c
options = Namespace(policy_name='undefined')
args = 'a c'
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args.split())
# -P undefined a c o
options = Namespace(policy_name='undefined')
args = 'a c o'
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args.split())
# -P undefined a/c
options = Namespace(policy_name='undefined')
args = 'a/c'
# ring_path, args = parse_get_node_args(options, args.split())
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args)
# -P undefined a/c/o
options = Namespace(policy_name='undefined')
args = 'a/c/o'
# ring_path, args = parse_get_node_args(options, args.split())
self.assertRaisesMessage(InfoSystemExit,
"No policy named 'undefined'",
parse_get_node_args, options, args)
# Mock tests
# /etc/swift/object.ring.gz(without any arguments i.e. a c o)
options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
self.assertRaisesMessage(
InfoSystemExit,
'No target specified',
parse_get_node_args, options, args.split())
# Similar ring_path and arguments
# /etc/swift/object.ring.gz /etc/swift/object.ring.gz
options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz /etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, '/etc/swift/object.ring.gz')
self.assertEqual(args, ['etc', 'swift', 'object.ring.gz'])
# /etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz
options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, '/etc/swift/object.ring.gz')
self.assertEqual(args, ['a', 'c', 'etc/swift/object.ring.gz'])
# Invalid path as mentioned in BUG#1539275
# /etc/swift/object.tar.gz account container object
options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.tar.gz account container object'
self.assertRaisesMessage(
InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# object.ring.gz a/
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# object.ring.gz a/c
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# object.ring.gz a/c/o
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz a/c/o/junk/test.ring.gz
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o/junk/test.ring.gz'])
# object.ring.gz a
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# object.ring.gz a c
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# object.ring.gz a c o
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz a c o blah blah
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c o blah blah'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
self.assertRaisesMessage(
InfoSystemExit,
'Invalid arguments',
parse_get_node_args, options, args.split())
# object.ring.gz a/c/o/blah/blah
options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o/blah/blah'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o/blah/blah'])
# object.ring.gz -p 1
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertFalse(args)
# object.ring.gz -p 1 a c o
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz -p 1 a c o forth_arg
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a c o forth_arg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
self.assertRaisesMessage(
InfoSystemExit,
'Invalid arguments',
parse_get_node_args, options, args.split())
# object.ring.gz -p 1 a/c/o
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz -p 1 a/c/junk/test.ring.gz
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'junk/test.ring.gz'])
# object.ring.gz -p 1 a/c/photos/cat.jpg
options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'photos/cat.jpg'])
# --all object.ring.gz a
options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# --all object.ring.gz a c
options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# --all object.ring.gz a c o
options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz account container photos/cat.jpg
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account container photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# object.ring.gz /account/container/photos/cat.jpg
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account/container/photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# Object name ends with 'ring.gz'
# object.ring.gz /account/container/junk/test.ring.gz
options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account/container/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args.split())
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
# Object name has special characters
# object.ring.gz /account/container/obj\nwith%0anewline
options = Namespace(policy_name=None, partition=None, quoted=None)
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args)
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
options = Namespace(policy_name=None, partition=None, quoted=True)
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args)
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'obj\nwith\nnewline'])
options = Namespace(policy_name=None, partition=None, quoted=False)
args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
ring_path, args = parse_get_node_args(options, args)
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
class TestPrintObj(TestCliInfoBase):
def setUp(self):
super(TestPrintObj, self).setUp()
self.datafile = os.path.join(self.testdir,
'1402017432.46642.data')
with open(self.datafile, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream'}
write_metadata(fp, md)
def test_print_obj_invalid(self):
datafile = '1402017324.68634.data'
self.assertRaises(InfoSystemExit, print_obj, datafile)
datafile = os.path.join(self.testdir, './1234.data')
self.assertRaises(InfoSystemExit, print_obj, datafile)
with open(datafile, 'wb') as fp:
fp.write(b'1234')
out = StringIO()
with mock.patch('sys.stdout', out):
self.assertRaises(InfoSystemExit, print_obj, datafile)
self.assertEqual(out.getvalue().strip(),
'Invalid metadata')
def test_print_obj_valid(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, swift_dir=self.testdir)
etag_msg = 'ETag: Not found in metadata'
length_msg = 'Content-Length: Not found in metadata'
self.assertIn(etag_msg, out.getvalue())
self.assertIn(length_msg, out.getvalue())
def test_print_obj_with_policy(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, swift_dir=self.testdir, policy_name='one')
etag_msg = 'ETag: Not found in metadata'
length_msg = 'Content-Length: Not found in metadata'
ring_loc_msg = 'ls -lah'
self.assertIn(etag_msg, out.getvalue())
self.assertIn(length_msg, out.getvalue())
self.assertIn(ring_loc_msg, out.getvalue())
def test_missing_etag(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile)
self.assertIn('ETag: Not found in metadata', out.getvalue())
class TestPrintObjFullMeta(TestCliInfoBase):
def setUp(self):
super(TestPrintObjFullMeta, self).setUp()
self.datafile = os.path.join(self.testdir,
'sda', 'objects-1',
'1', 'ea8',
'db4449e025aca992307c7c804a67eea8',
'1402017884.18202.data')
utils.mkdirs(os.path.dirname(self.datafile))
with open(self.datafile, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
'Content-Length': '0'}
write_metadata(fp, md)
def test_print_obj(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, swift_dir=self.testdir)
self.assertIn('/objects-1/', out.getvalue())
def test_print_obj_policy_index(self):
# Check an output of policy index when current directory is in
# object-* directory
out = StringIO()
hash_dir = os.path.dirname(self.datafile)
file_name = os.path.basename(self.datafile)
# Change working directory to object hash dir
cwd = os.getcwd()
try:
os.chdir(hash_dir)
with mock.patch('sys.stdout', out):
print_obj(file_name, swift_dir=self.testdir)
finally:
os.chdir(cwd)
self.assertIn('X-Backend-Storage-Policy-Index: 1', out.getvalue())
def test_print_obj_curl_command_ipv4(self):
# Note: policy 2 has IPv4 addresses in its ring
datafile2 = os.path.join(
self.testdir,
'sda', 'objects-2', '1', 'ea8',
'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data')
utils.mkdirs(os.path.dirname(datafile2))
with open(datafile2, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
'Content-Length': '0'}
write_metadata(fp, md)
object_ring = ring.Ring(self.testdir, ring_name='object-2')
part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj')
node = nodes[0]
out = StringIO()
hash_dir = os.path.dirname(datafile2)
file_name = os.path.basename(datafile2)
# Change working directory to object hash dir
cwd = os.getcwd()
try:
os.chdir(hash_dir)
with mock.patch('sys.stdout', out):
print_obj(file_name, swift_dir=self.testdir)
finally:
os.chdir(cwd)
exp_curl = (
'curl -g -I -XHEAD '
'"http://{host}:{port}/{device}/{part}/AUTH_admin/c/obj" '
'-H "X-Backend-Storage-Policy-Index: 2" --path-as-is').format(
host=node['ip'],
port=node['port'],
device=node['device'],
part=part)
self.assertIn(exp_curl, out.getvalue())
def test_print_obj_curl_command_ipv6(self):
# Note: policy 3 has IPv6 addresses in its ring
datafile3 = os.path.join(
self.testdir,
'sda', 'objects-3', '1', 'ea8',
'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data')
utils.mkdirs(os.path.dirname(datafile3))
with open(datafile3, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
'Content-Length': '0'}
write_metadata(fp, md)
object_ring = ring.Ring(self.testdir, ring_name='object-3')
part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj')
node = nodes[0]
out = StringIO()
hash_dir = os.path.dirname(datafile3)
file_name = os.path.basename(datafile3)
# Change working directory to object hash dir
cwd = os.getcwd()
try:
os.chdir(hash_dir)
with mock.patch('sys.stdout', out):
print_obj(file_name, swift_dir=self.testdir)
finally:
os.chdir(cwd)
exp_curl = (
'curl -g -I -XHEAD '
'"http://[{host}]:{port}'
'/{device}/{part}/AUTH_admin/c/obj" '
'-H "X-Backend-Storage-Policy-Index: 3" --path-as-is').format(
host=node['ip'],
port=node['port'],
device=node['device'],
part=part)
self.assertIn(exp_curl, out.getvalue())
def test_print_obj_meta_and_ts_files(self):
# verify that print_obj will also read from meta and ts files
base = os.path.splitext(self.datafile)[0]
for ext in ('.meta', '.ts'):
test_file = '%s%s' % (base, ext)
os.link(self.datafile, test_file)
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(test_file, swift_dir=self.testdir)
self.assertIn('/objects-1/', out.getvalue())
def test_print_obj_no_ring(self):
no_rings_dir = os.path.join(self.testdir, 'no_rings_here')
os.mkdir(no_rings_dir)
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, swift_dir=no_rings_dir)
self.assertIn('d41d8cd98f00b204e9800998ecf8427e', out.getvalue())
self.assertNotIn('Partition', out.getvalue())
def test_print_obj_policy_name_mismatch(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, policy_name='two', swift_dir=self.testdir)
ring_alert_msg = 'Warning: Ring does not match policy!'
self.assertIn(ring_alert_msg, out.getvalue())
def test_valid_etag(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile)
self.assertIn('ETag: d41d8cd98f00b204e9800998ecf8427e (valid)',
out.getvalue())
def test_invalid_etag(self):
with open(self.datafile, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'badetag',
'Content-Length': '0'}
write_metadata(fp, md)
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile)
self.assertIn('ETag: badetag doesn\'t match file hash',
out.getvalue())
def test_unchecked_etag(self):
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj(self.datafile, check_etag=False)
self.assertIn('ETag: d41d8cd98f00b204e9800998ecf8427e (not checked)',
out.getvalue())
def test_print_obj_metadata(self):
self.assertRaisesMessage(ValueError, 'Metadata is None',
print_obj_metadata, [])
def get_metadata(items):
md = {
'name': '/AUTH_admin/c/dummy',
'Content-Type': 'application/octet-stream',
'X-Timestamp': 106.3,
}
md.update(items)
return md
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
No metadata found
Transient System Metadata:
No metadata found
User Metadata:
X-Object-Meta-Mtime: 107.3
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({
'X-Object-Sysmeta-Mtime': '107.3',
'X-Object-Sysmeta-Name': 'Obj name',
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
Mtime: 107.3
Name: Obj name
Transient System Metadata:
No metadata found
User Metadata:
No metadata found
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({
'X-Object-Meta-Mtime': '107.3',
'X-Object-Sysmeta-Mtime': '107.3',
'X-Object-Mtime': '107.3',
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
X-Object-Sysmeta-Mtime: 107.3
Transient System Metadata:
No metadata found
User Metadata:
X-Object-Meta-Mtime: 107.3
Other Metadata:
X-Object-Mtime: 107.3''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
No metadata found
Transient System Metadata:
No metadata found
User Metadata:
No metadata found
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
metadata['name'] = '/a-s'
self.assertRaisesMessage(ValueError, 'Path is invalid',
print_obj_metadata, metadata)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
del metadata['name']
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata, True)
exp_out = '''Path: Not found in metadata
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
No metadata found
Transient System Metadata:
No metadata found
User Metadata:
Mtime: 107.3
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
del metadata['Content-Type']
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: Not found in metadata
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
No metadata found
Transient System Metadata:
No metadata found
User Metadata:
X-Object-Meta-Mtime: 107.3
Other Metadata:
No metadata found''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
del metadata['X-Timestamp']
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: Not found in metadata
System Metadata:
No metadata found
Transient System Metadata:
No metadata found
User Metadata:
Mtime: 107.3
Other Metadata:
No metadata found'''
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({
'X-Object-Meta-Mtime': '107.3',
'X-Object-Sysmeta-Mtime': '106.3',
'X-Object-Transient-Sysmeta-Mtime': '105.3',
'X-Object-Mtime': '104.3',
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
X-Object-Sysmeta-Mtime: 106.3
Transient System Metadata:
X-Object-Transient-Sysmeta-Mtime: 105.3
User Metadata:
X-Object-Meta-Mtime: 107.3
Other Metadata:
X-Object-Mtime: 104.3''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({
'X-Object-Meta-Mtime': '107.3',
'X-Object-Sysmeta-Mtime': '106.3',
'X-Object-Transient-Sysmeta-Mtime': '105.3',
'X-Object-Mtime': '104.3',
})
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(metadata, True)
exp_out = '''Path: /AUTH_admin/c/dummy
Account: AUTH_admin
Container: c
Object: dummy
Object hash: 128fdf98bddd1b1e8695f4340e67a67a
Content-Type: application/octet-stream
Timestamp: 1970-01-01T00:01:46.300000 (%s)
System Metadata:
Mtime: 106.3
Transient System Metadata:
Mtime: 105.3
User Metadata:
Mtime: 107.3
Other Metadata:
X-Object-Mtime: 104.3''' % (
utils.Timestamp(106.3).internal)
self.assertEqual(out.getvalue().strip(), exp_out)
def test_print_obj_crypto_metadata(self):
cryto_body_meta = '%7B%22body_key%22%3A+%7B%22iv%22%3A+%22HmpwLDjlo' \
'6JxFvOOCVyT6Q%3D%3D%22%2C+%22key%22%3A+%22dEox1dyZJPCs4mtmiQDg' \
'u%2Fv1RTointi%2FUhm2y%2BgB3F8%3D%22%7D%2C+%22cipher%22%3A+%22A' \
'ES_CTR_256%22%2C+%22iv%22%3A+%22l3W0NZekjt4PFkAJXubVYQ%3D%3D%2' \
'2%2C+%22key_id%22%3A+%7B%22path%22%3A+%22%2FAUTH_test%2Ftest%2' \
'Ftest%22%2C+%22secret_id%22%3A+%222018%22%2C+%22v%22%3A+%221%2' \
'2%7D%7D'
crypto_meta_meta = '%7B%22cipher%22%3A+%22AES_CTR_256%22%2C+%22key_' \
'id%22%3A+%7B%22path%22%3A+%22%2FAUTH_test%2Ftest%2Ftest%22%2C+' \
'%22secret_id%22%3A+%222018%22%2C+%22v%22%3A+%221%22%7D%7D'
stub_metadata = {
'name': '/AUTH_test/test/test',
'Content-Type': 'application/sekret',
'X-Timestamp': '1549899598.237075',
'X-Object-Sysmeta-Crypto-Body-Meta': cryto_body_meta,
'X-Object-Transient-Sysmeta-Crypto-Meta': crypto_meta_meta,
}
out = StringIO()
with mock.patch('sys.stdout', out):
print_obj_metadata(stub_metadata)
exp_out = '''Path: /AUTH_test/test/test
Account: AUTH_test
Container: test
Object: test
Object hash: dc3a7d53522b9392b0d19571a752fdfb
Content-Type: application/sekret
Timestamp: 2019-02-11T15:39:58.237080 (1549899598.23708)
System Metadata:
X-Object-Sysmeta-Crypto-Body-Meta: %s
Transient System Metadata:
X-Object-Transient-Sysmeta-Crypto-Meta: %s
User Metadata:
No metadata found
Other Metadata:
No metadata found
Data crypto details: {
"body_key": {
"iv": "HmpwLDjlo6JxFvOOCVyT6Q==",
"key": "dEox1dyZJPCs4mtmiQDgu/v1RTointi/Uhm2y+gB3F8="
},
"cipher": "AES_CTR_256",
"iv": "l3W0NZekjt4PFkAJXubVYQ==",
"key_id": {
"path": "/AUTH_test/test/test",
"secret_id": "2018",
"v": "1"
}
}
Metadata crypto details: {
"cipher": "AES_CTR_256",
"key_id": {
"path": "/AUTH_test/test/test",
"secret_id": "2018",
"v": "1"
}
}''' % (cryto_body_meta, crypto_meta_meta)
self.maxDiff = None
self.assertMultiLineEqual(out.getvalue().strip(), exp_out)
class TestPrintObjWeirdPath(TestPrintObjFullMeta):
def setUp(self):
super(TestPrintObjWeirdPath, self).setUp()
# device name is objects-0 instead of sda, this is weird.
self.datafile = os.path.join(self.testdir,
'objects-0', 'objects-1',
'1', 'ea8',
'db4449e025aca992307c7c804a67eea8',
'1402017884.18202.data')
utils.mkdirs(os.path.dirname(self.datafile))
with open(self.datafile, 'wb') as fp:
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
'Content-Length': '0'}
write_metadata(fp, md)
| swift-master | test/unit/cli/test_info.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
import six
from mock import mock
from swift.cli import ringcomposer
from test.unit import write_stub_builder
class TestCommands(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.composite_builder_file = os.path.join(self.tmpdir,
'composite.builder')
self.composite_ring_file = os.path.join(self.tmpdir,
'composite.ring')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_composer(self, args):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
with self.assertRaises(SystemExit) as cm:
ringcomposer.main(args)
return (cm.exception.code,
mock_stdout.getvalue(),
mock_stderr.getvalue())
def test_unknown_command(self):
args = ('', self.composite_builder_file, 'unknown')
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn('invalid choice', stderr)
args = ('', 'non-existent-file', 'unknown')
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn('invalid choice', stderr)
def test_bad_composite_builder_file(self):
cmds = (('', self.composite_builder_file, 'show'),
('', self.composite_builder_file, 'compose',
'b1_file', 'b2_file', '--output', self.composite_ring_file))
for cmd in cmds:
try:
with open(self.composite_builder_file, 'wb') as fd:
fd.write(b'not json')
exit_code, stdout, stderr = self._run_composer(cmd)
self.assertEqual(2, exit_code)
self.assertIn('An error occurred while loading the composite '
'builder file', stderr)
self.assertIn(
'File does not contain valid composite ring data', stderr)
except AssertionError as err:
self.fail('Failed testing command %r due to: %s' % (cmd, err))
def test_compose(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
args = ('', self.composite_builder_file, 'compose', b1_file, b2_file,
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(0, exit_code)
self.assertTrue(os.path.exists(self.composite_builder_file))
self.assertTrue(os.path.exists(self.composite_ring_file))
def test_compose_existing(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
args = ('', self.composite_builder_file, 'compose', b1_file, b2_file,
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(0, exit_code)
os.unlink(self.composite_ring_file)
# no changes - expect failure
args = ('', self.composite_builder_file, 'compose',
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertFalse(os.path.exists(self.composite_ring_file))
# --force should force output
args = ('', self.composite_builder_file, 'compose',
'--output', self.composite_ring_file, '--force')
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(0, exit_code)
self.assertTrue(os.path.exists(self.composite_ring_file))
def test_compose_insufficient_component_builder_files(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
args = ('', self.composite_builder_file, 'compose', b1_file,
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn('An error occurred while composing the ring', stderr)
self.assertIn('Two or more component builders are required', stderr)
self.assertFalse(os.path.exists(self.composite_builder_file))
self.assertFalse(os.path.exists(self.composite_ring_file))
def test_compose_nonexistent_component_builder_file(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
bad_file = os.path.join(self.tmpdir, 'non-existent-file')
args = ('', self.composite_builder_file, 'compose', b1_file, bad_file,
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertIn('An error occurred while composing the ring', stderr)
self.assertIn('Ring Builder file does not exist', stderr)
self.assertEqual(2, exit_code)
self.assertFalse(os.path.exists(self.composite_builder_file))
self.assertFalse(os.path.exists(self.composite_ring_file))
def test_compose_fails_to_write_composite_ring_file(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
args = ('', self.composite_builder_file, 'compose', b1_file, b2_file,
'--output', self.composite_ring_file)
with mock.patch('swift.common.ring.RingData.save',
side_effect=IOError('io error')):
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn(
'An error occurred while writing the composite ring file', stderr)
self.assertIn('io error', stderr)
self.assertFalse(os.path.exists(self.composite_builder_file))
self.assertFalse(os.path.exists(self.composite_ring_file))
def test_compose_fails_to_write_composite_builder_file(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
args = ('', self.composite_builder_file, 'compose', b1_file, b2_file,
'--output', self.composite_ring_file)
func = 'swift.common.ring.composite_builder.CompositeRingBuilder.save'
with mock.patch(func, side_effect=IOError('io error')):
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn(
'An error occurred while writing the composite builder file',
stderr)
self.assertIn('io error', stderr)
self.assertFalse(os.path.exists(self.composite_builder_file))
self.assertTrue(os.path.exists(self.composite_ring_file))
def test_show(self):
b1, b1_file = write_stub_builder(self.tmpdir, 1)
b2, b2_file = write_stub_builder(self.tmpdir, 2)
args = ('', self.composite_builder_file, 'compose', b1_file, b2_file,
'--output', self.composite_ring_file)
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(0, exit_code)
args = ('', self.composite_builder_file, 'show')
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(0, exit_code)
expected = {'component_builder_files': {b1.id: b1_file,
b2.id: b2_file},
'components': [
{'id': b1.id,
'replicas': b1.replicas,
# added replicas devices plus rebalance
'version': b1.replicas + 1},
{'id': b2.id,
'replicas': b2.replicas,
# added replicas devices plus rebalance
'version': b2.replicas + 1}],
'version': 1
}
self.assertEqual(expected, json.loads(stdout))
def test_show_nonexistent_composite_builder_file(self):
args = ('', 'non-existent-file', 'show')
exit_code, stdout, stderr = self._run_composer(args)
self.assertEqual(2, exit_code)
self.assertIn(
'An error occurred while loading the composite builder file',
stderr)
self.assertIn("No such file or directory: 'non-existent-file'", stderr)
| swift-master | test/unit/cli/test_ringcomposer.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.container.backend """
import base64
import errno
import os
import inspect
import shutil
import unittest
from time import sleep, time
from uuid import uuid4
import random
from collections import defaultdict
from contextlib import contextmanager
import sqlite3
import string
import pickle
import json
import itertools
import six
from swift.common.exceptions import LockTimeout
from swift.container.backend import ContainerBroker, \
update_new_item_from_existing, UNSHARDED, SHARDING, SHARDED, \
COLLAPSED, SHARD_LISTING_STATES, SHARD_UPDATE_STATES, sift_shard_ranges
from swift.common.db import DatabaseAlreadyExists, GreenDBConnection, \
TombstoneReclaimer, GreenDBCursor
from swift.common.request_helpers import get_reserved_name
from swift.common.utils import Timestamp, encode_timestamps, hash_path, \
ShardRange, make_db_file_path, md5, ShardRangeList, Namespace
from swift.common.storage_policy import POLICIES
import mock
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import (patch_policies, with_tempdir, make_timestamp_iter,
EMPTY_ETAG, mock_timestamp_now)
from test.unit.common import test_db
class TestContainerBroker(test_db.TestDbBase):
"""Tests for ContainerBroker"""
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'policy_stat',
'container_info', 'shard_range'}
server_type = 'container'
def setUp(self):
super(TestContainerBroker, self).setUp()
self.ts = make_timestamp_iter()
def _assert_shard_ranges(self, broker, expected, include_own=False):
actual = broker.get_shard_ranges(include_deleted=True,
include_own=include_own)
self.assertEqual([dict(sr) for sr in expected],
[dict(sr) for sr in actual])
def _delete_table(self, broker, table):
"""
Delete the table ``table`` from broker database.
:param broker: an object instance of ContainerBroker.
:param table: the name of the table to delete.
"""
with broker.get() as conn:
try:
conn.execute("""
DROP TABLE %s
""" % table)
except sqlite3.OperationalError as err:
if ('no such table: %s' % table) in str(err):
return
else:
raise
def _add_shard_range_table(self, broker):
"""
Add the 'shard_range' table into the broker database.
:param broker: an object instance of ContainerBroker.
"""
with broker.get() as conn:
broker.create_shard_range_table(conn)
def test_creation(self):
# Test ContainerBroker.__init__
db_file = self.get_db_path()
broker = ContainerBroker(db_file, account='a', container='c')
self.assertEqual(broker._db_file, db_file)
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
curs.execute("SELECT name FROM sqlite_master WHERE type='table';")
self.assertEqual(self.expected_db_tables,
{row[0] for row in curs.fetchall()})
# check the update trigger
broker.put_object('blah', Timestamp.now().internal, 0, 'text/plain',
'etag', 0, 0)
# commit pending file into db
broker._commit_puts()
with broker.get() as conn:
with self.assertRaises(sqlite3.DatabaseError) as cm:
conn.execute('UPDATE object SET name="blah";')
self.assertIn('UPDATE not allowed', str(cm.exception))
if 'shard_range' in self.expected_db_tables:
# check the update trigger
broker.merge_shard_ranges(broker.get_own_shard_range())
with broker.get() as conn:
with self.assertRaises(sqlite3.DatabaseError) as cm:
conn.execute('UPDATE shard_range SET name="blah";')
self.assertIn('UPDATE not allowed', str(cm.exception))
@patch_policies
def test_storage_policy_property(self):
for policy in POLICIES:
broker = ContainerBroker(self.get_db_path(), account='a',
container='policy_%s' % policy.name)
broker.initialize(next(self.ts).internal, policy.idx)
with broker.get() as conn:
try:
conn.execute('''SELECT storage_policy_index
FROM container_stat''')
except Exception:
is_migrated = False
else:
is_migrated = True
if not is_migrated:
# pre spi tests don't set policy on initialize
broker.set_storage_policy_index(policy.idx)
# clear cached state
if hasattr(broker, '_storage_policy_index'):
del broker._storage_policy_index
execute_queries = []
real_execute = GreenDBCursor.execute
def tracking_exec(*args):
if not args[1].startswith('PRAGMA '):
execute_queries.append(args[1])
return real_execute(*args)
with mock.patch.object(GreenDBCursor, 'execute', tracking_exec):
self.assertEqual(policy.idx, broker.storage_policy_index)
self.assertEqual(len(execute_queries), 1, execute_queries)
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
if not is_migrated:
# pre spi tests don't set policy when initializing the
# new broker, either
broker.set_storage_policy_index(policy.idx)
del execute_queries[:]
del broker._storage_policy_index
with mock.patch.object(GreenDBCursor, 'execute', tracking_exec):
self.assertEqual(policy.idx, broker.storage_policy_index)
self.assertEqual(len(execute_queries), 1, execute_queries)
self.assertTrue(broker.set_sharded_state())
del execute_queries[:]
del broker._storage_policy_index
with mock.patch.object(GreenDBCursor, 'execute', tracking_exec):
self.assertEqual(policy.idx, broker.storage_policy_index)
self.assertEqual(len(execute_queries), 1, execute_queries)
# make sure it's cached
with mock.patch.object(broker, 'get', side_effect=RuntimeError):
self.assertEqual(policy.idx, broker.storage_policy_index)
def test_exception(self):
# Test ContainerBroker throwing a conn away after
# unhandled exception
first_conn = None
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEqual(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assertTrue(broker.conn is None)
@with_tempdir
@mock.patch("swift.container.backend.ContainerBroker.get")
def test_is_old_enough_to_reclaim(self, tempdir, mocked_get):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
def do_test(now, reclaim_age, put_ts, delete_ts, expected):
mocked_get.return_value.\
__enter__.return_value.\
execute.return_value.\
fetchone.return_value = dict(delete_timestamp=delete_ts,
put_timestamp=put_ts)
self.assertEqual(expected,
broker.is_old_enough_to_reclaim(now, reclaim_age))
now_time = time()
tests = (
# (now, reclaim_age, put_ts, del_ts, expected),
(0, 0, 0, 0, False),
# Never deleted
(now_time, 100, now_time - 200, 0, False),
# Deleted ts older the put_ts
(now_time, 100, now_time - 150, now_time - 200, False),
# not reclaim_age yet
(now_time, 100, now_time - 150, now_time - 50, False),
# right on reclaim doesn't work
(now_time, 100, now_time - 150, now_time - 100, False),
# put_ts wins over del_ts
(now_time, 100, now_time - 150, now_time - 150, False),
# good case, reclaim > delete_ts > put_ts
(now_time, 100, now_time - 150, now_time - 125, True))
for test in tests:
do_test(*test)
@with_tempdir
def test_is_reclaimable(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
self.assertFalse(broker.is_reclaimable(float(next(self.ts)), 0))
broker.delete_db(next(self.ts).internal)
self.assertFalse(broker.is_reclaimable(float(next(self.ts)), 604800))
self.assertTrue(broker.is_reclaimable(float(next(self.ts)), 0))
# adding a shard range makes us unreclaimable
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
broker.merge_shard_ranges([sr])
self.assertFalse(broker.is_reclaimable(float(next(self.ts)), 0))
# ... but still "deleted"
self.assertTrue(broker.is_deleted())
# ... until the shard range is deleted
sr.set_deleted(next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.is_reclaimable(float(next(self.ts)), 0))
# adding an object makes us unreclaimable
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker.merge_items([dict(obj)])
self.assertFalse(broker.is_reclaimable(float(next(self.ts)), 0))
# ... and "not deleted"
self.assertFalse(broker.is_deleted())
@with_tempdir
def test_sharding_state_is_not_reclaimable(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
broker.enable_sharding(next(self.ts))
broker.set_sharding_state()
broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
# we won't reclaim in SHARDING state
self.assertEqual(SHARDING, broker.get_db_state())
self.assertFalse(broker.is_reclaimable(float(next(self.ts)), 0))
# ... but if we find one stuck like this it's easy enough to fix
broker.set_sharded_state()
self.assertTrue(broker.is_reclaimable(float(next(self.ts)), 0))
@with_tempdir
def test_is_deleted(self, tempdir):
# Test ContainerBroker.is_deleted() and get_info_is_deleted()
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
self.assertFalse(broker.is_deleted())
broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
def check_object_counted(broker_to_test, broker_with_object):
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.is_deleted())
info, deleted = broker_to_test.get_info_is_deleted()
self.assertFalse(deleted)
self.assertEqual(1, info['object_count'])
obj.update({'created_at': next(self.ts).internal, 'deleted': 1})
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.is_deleted())
info, deleted = broker_to_test.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
def check_object_not_counted(broker):
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker.merge_items([dict(obj)])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
obj.update({'created_at': next(self.ts).internal, 'deleted': 1})
broker.merge_items([dict(obj)])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
def check_shard_ranges_not_counted():
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
def check_shard_ranges_counted():
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
counted_states = (ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING)
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
expected = state not in counted_states
self.assertEqual(expected, broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertEqual(expected, deleted)
self.assertEqual(0 if expected else 13, info['object_count'])
sr.update_meta(0, 0, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
# unsharded
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# move to sharding state
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.is_deleted())
# check object in retiring db is considered
check_object_counted(broker, broker.get_brokers()[0])
self.assertTrue(broker.is_deleted())
check_shard_ranges_not_counted()
# misplaced object in fresh db is not considered
check_object_not_counted(broker)
# move to sharded state
self.assertTrue(broker.set_sharded_state())
check_object_not_counted(broker)
check_shard_ranges_counted()
# own shard range has no influence
own_sr = broker.get_own_shard_range()
own_sr.update_meta(3, 4, meta_timestamp=next(self.ts))
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.is_deleted())
@with_tempdir
def test_empty(self, tempdir):
# Test ContainerBroker.empty
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
self.assertTrue(broker.is_root_container())
def check_object_counted(broker_to_test, broker_with_object):
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.empty())
# and delete it
obj.update({'created_at': next(self.ts).internal, 'deleted': 1})
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.empty())
def check_shard_ranges_not_counted():
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
# empty other shard ranges do not influence result
sr.update_meta(0, 0, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
self.assertTrue(broker.empty())
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# own shard range is not considered for object count
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
broker.put_object('o', next(self.ts).internal, 0, 'text/plain',
EMPTY_ETAG)
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertFalse(broker.empty())
broker.delete_object('o', next(self.ts).internal)
self.assertTrue(broker.empty())
# have own shard range but in state ACTIVE
self.assertEqual(ShardRange.ACTIVE, own_sr.state)
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
def check_shard_ranges_counted():
# other shard range is considered
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
counted_states = (ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING)
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertEqual(state not in counted_states, broker.empty())
# empty other shard ranges do not influence result
sr.update_meta(0, 0, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
# enable sharding
broker.enable_sharding(next(self.ts))
check_object_counted(broker, broker)
check_shard_ranges_counted()
# move to sharding state
self.assertTrue(broker.set_sharding_state())
# check object in retiring db is considered
check_object_counted(broker, broker.get_brokers()[0])
self.assertTrue(broker.empty())
# as well as misplaced objects in fresh db
check_object_counted(broker, broker)
check_shard_ranges_counted()
# move to sharded state
self.assertTrue(broker.set_sharded_state())
self.assertTrue(broker.empty())
check_object_counted(broker, broker)
check_shard_ranges_counted()
# own shard range still has no influence
own_sr = broker.get_own_shard_range()
own_sr.update_meta(3, 4, meta_timestamp=next(self.ts))
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
@with_tempdir
def test_empty_old_style_shard_container(self, tempdir):
# Test ContainerBroker.empty for a shard container where shard range
# usage should not be considered
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='.shards_a', container='cc')
broker.initialize(next(self.ts).internal, 0)
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container())
def check_object_counted(broker_to_test, broker_with_object):
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.empty())
# and delete it
obj.update({'created_at': next(self.ts).internal, 'deleted': 1})
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.empty())
self.assertTrue(broker.empty())
check_object_counted(broker, broker)
# own shard range is not considered for object count
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
broker.put_object('o', next(self.ts).internal, 0, 'text/plain',
EMPTY_ETAG)
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertFalse(broker.empty())
broker.delete_object('o', next(self.ts).internal)
self.assertTrue(broker.empty())
def check_shard_ranges_not_counted():
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
# empty other shard ranges do not influence result
sr.update_meta(0, 0, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
check_shard_ranges_not_counted()
# move to sharding state
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
# check object in retiring db is considered
check_object_counted(broker, broker.get_brokers()[0])
self.assertTrue(broker.empty())
# as well as misplaced objects in fresh db
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# move to sharded state
self.assertTrue(broker.set_sharded_state())
self.assertTrue(broker.empty())
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# own shard range still has no influence
own_sr = broker.get_own_shard_range()
own_sr.update_meta(3, 4, meta_timestamp=next(self.ts))
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
@with_tempdir
def test_empty_shard_container(self, tempdir):
# Test ContainerBroker.empty for a shard container where shard range
# usage should not be considered
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='.shards_a', container='cc')
broker.initialize(next(self.ts).internal, 0)
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertFalse(broker.is_root_container())
self.assertEqual('a/c', broker.root_path)
def check_object_counted(broker_to_test, broker_with_object):
obj = {'name': 'o', 'created_at': next(self.ts).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.empty())
# and delete it
obj.update({'created_at': next(self.ts).internal, 'deleted': 1})
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.empty())
self.assertTrue(broker.empty())
self.assertFalse(broker.is_root_container())
check_object_counted(broker, broker)
# own shard range is not considered for object count
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
broker.put_object('o', next(self.ts).internal, 0, 'text/plain',
EMPTY_ETAG)
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
broker.merge_shard_ranges([own_sr])
self.assertFalse(broker.empty())
broker.delete_object('o', next(self.ts).internal)
self.assertTrue(broker.empty())
def check_shard_ranges_not_counted():
sr = ShardRange('.shards_a/shard_c', next(self.ts), object_count=0)
sr.update_meta(13, 99, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
# empty other shard ranges do not influence result
sr.update_meta(0, 0, meta_timestamp=next(self.ts))
for state in ShardRange.STATES:
sr.update_state(state, state_timestamp=next(self.ts))
broker.merge_shard_ranges([sr])
self.assertTrue(broker.empty())
check_shard_ranges_not_counted()
# move to sharding state
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
# check object in retiring db is considered
check_object_counted(broker, broker.get_brokers()[0])
self.assertTrue(broker.empty())
# as well as misplaced objects in fresh db
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# move to sharded state
self.assertTrue(broker.set_sharded_state())
self.assertTrue(broker.empty())
check_object_counted(broker, broker)
check_shard_ranges_not_counted()
# own shard range still has no influence
own_sr = broker.get_own_shard_range()
own_sr.update_meta(3, 4, meta_timestamp=next(self.ts))
broker.merge_shard_ranges([own_sr])
self.assertTrue(broker.empty())
self.assertFalse(broker.is_deleted())
self.assertFalse(broker.is_root_container())
# sharder won't call delete_db() unless own_shard_range is deleted
own_sr.deleted = True
own_sr.timestamp = next(self.ts)
broker.merge_shard_ranges([own_sr])
broker.delete_db(next(self.ts).internal)
self.assertFalse(broker.is_root_container())
self.assertEqual('a/c', broker.root_path)
# Get a fresh broker, with instance cache unset
broker = ContainerBroker(db_path, account='.shards_a', container='cc')
self.assertTrue(broker.empty())
self.assertTrue(broker.is_deleted())
self.assertFalse(broker.is_root_container())
self.assertEqual('a/c', broker.root_path)
# older versions *did* delete sharding sysmeta when db was deleted...
# but still know they are not root containers
broker.set_sharding_sysmeta('Quoted-Root', '')
self.assertFalse(broker.is_root_container())
self.assertEqual('a/c', broker.root_path)
# however, they have bogus root path once instance cache is cleared...
broker = ContainerBroker(db_path, account='.shards_a', container='cc')
self.assertFalse(broker.is_root_container())
self.assertEqual('.shards_a/cc', broker.root_path)
def test_reclaim(self):
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('o', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', Timestamp.now().internal)
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(Timestamp.now().internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
# Test the return values of reclaim()
broker.put_object('w', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('x', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('y', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('z', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker._commit_puts()
# Test before deletion
broker.reclaim(Timestamp.now().internal, time())
broker.delete_db(Timestamp.now().internal)
def test_batch_reclaim(self):
num_of_objects = 60
obj_specs = []
now = time()
top_of_the_minute = now - (now % 60)
c = itertools.cycle([True, False])
for m, is_deleted in six.moves.zip(range(num_of_objects), c):
offset = top_of_the_minute - (m * 60)
obj_specs.append((Timestamp(offset), is_deleted))
random.seed(now)
random.shuffle(obj_specs)
policy_indexes = list(p.idx for p in POLICIES)
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
for i, obj_spec in enumerate(obj_specs):
# with object12 before object2 and shuffled ts.internal we
# shouldn't be able to accidently rely on any implicit ordering
obj_name = 'object%s' % i
pidx = random.choice(policy_indexes)
ts, is_deleted = obj_spec
if is_deleted:
broker.delete_object(obj_name, ts.internal, pidx)
else:
broker.put_object(obj_name, ts.internal, 0, 'text/plain',
'etag', storage_policy_index=pidx)
# commit pending file into db
broker._commit_puts()
def count_reclaimable(conn, reclaim_age):
return conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1 AND created_at < ?", (reclaim_age,)
).fetchone()[0]
# This is intended to divide the set of timestamps exactly in half
# regardless of the value of now
reclaim_age = top_of_the_minute + 1 - (num_of_objects / 2 * 60)
with broker.get() as conn:
self.assertEqual(count_reclaimable(conn, reclaim_age),
num_of_objects / 4)
trace = []
class TracingReclaimer(TombstoneReclaimer):
def _reclaim(self, conn):
trace.append(
(self.age_timestamp, self.marker,
count_reclaimable(conn, self.age_timestamp)))
return super(TracingReclaimer, self)._reclaim(conn)
with mock.patch(
'swift.common.db.TombstoneReclaimer', TracingReclaimer), \
mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
broker.reclaim(reclaim_age, reclaim_age)
with broker.get() as conn:
self.assertEqual(count_reclaimable(conn, reclaim_age), 0)
self.assertEqual(3, len(trace), trace)
self.assertEqual([age for age, marker, reclaimable in trace],
[reclaim_age] * 3)
# markers are in-order
self.assertLess(trace[0][1], trace[1][1])
self.assertLess(trace[1][1], trace[2][1])
# reclaimable count gradually decreases
# generally, count1 > count2 > count3, but because of the randomness
# we may occassionally have count1 == count2 or count2 == count3
self.assertGreaterEqual(trace[0][2], trace[1][2])
self.assertGreaterEqual(trace[1][2], trace[2][2])
# technically, this might happen occasionally, but *really* rarely
self.assertTrue(trace[0][2] > trace[1][2] or
trace[1][2] > trace[2][2])
def test_reclaim_with_duplicate_names(self):
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
now = time()
ages_ago = Timestamp(now - (3 * 7 * 24 * 60 * 60))
for i in range(10):
for spidx in range(10):
obj_name = 'object%s' % i
broker.delete_object(obj_name, ages_ago.internal, spidx)
# commit pending file into db
broker._commit_puts()
reclaim_age = now - (2 * 7 * 24 * 60 * 60)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE created_at < ?", (reclaim_age,)
).fetchone()[0], 100)
with mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
broker.reclaim(reclaim_age, reclaim_age)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
).fetchone()[0], 0)
@with_tempdir
def test_reclaim_deadlock(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', '%s.db' % uuid4())
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(Timestamp(100).internal, 0)
# there's some magic count here that causes the failure, something
# about the size of object records and sqlite page size maybe?
count = 23000
for i in range(count):
obj_name = 'o%d' % i
ts = Timestamp(200).internal
broker.delete_object(obj_name, ts)
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object").fetchone()[0], count)
# make a broker whose container attribute is not yet set so that
# reclaim will need to query info to set it
broker = ContainerBroker(db_path, timeout=1)
# verify that reclaim doesn't get deadlocked and timeout
broker.reclaim(300, 300)
# check all objects were reclaimed
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object"
).fetchone()[0], 0)
@with_tempdir
def test_reclaim_shard_ranges(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', '%s.db' % uuid4())
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
older = next(self.ts)
same = next(self.ts)
newer = next(self.ts)
shard_ranges = [
ShardRange('.shards_a/older_deleted', older.internal, '', 'a',
deleted=True),
ShardRange('.shards_a/same_deleted', same.internal, 'a', 'b',
deleted=True),
ShardRange('.shards_a/newer_deleted', newer.internal, 'b', 'c',
deleted=True),
ShardRange('.shards_a/older', older.internal, 'c', 'd'),
ShardRange('.shards_a/same', same.internal, 'd', 'e'),
ShardRange('.shards_a/newer', newer.internal, 'e', 'f'),
# own shard range is never reclaimed, even if deleted
ShardRange('a/c', older.internal, '', '', deleted=True)]
broker.merge_shard_ranges(
random.sample(shard_ranges, len(shard_ranges)))
def assert_row_count(expected):
with broker.get() as conn:
res = conn.execute("SELECT count(*) FROM shard_range")
self.assertEqual(expected, res.fetchone()[0])
broker.reclaim(older.internal, older.internal)
assert_row_count(7)
self._assert_shard_ranges(broker, shard_ranges, include_own=True)
broker.reclaim(older.internal, same.internal)
assert_row_count(6)
self._assert_shard_ranges(broker, shard_ranges[1:], include_own=True)
broker.reclaim(older.internal, newer.internal)
assert_row_count(5)
self._assert_shard_ranges(broker, shard_ranges[2:], include_own=True)
broker.reclaim(older.internal, next(self.ts).internal)
assert_row_count(4)
self._assert_shard_ranges(broker, shard_ranges[3:], include_own=True)
def test_get_info_is_deleted(self):
ts = make_timestamp_iter()
start = next(ts)
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
# create it
broker.initialize(start.internal, POLICIES.default.idx)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(is_deleted, False) # sanity
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], start.internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ in (
TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI,
TestContainerBrokerBeforeShardRanges,
TestContainerBrokerBeforeShardRangeReportedColumn,
TestContainerBrokerBeforeShardRangeTombstonesColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
start.internal)
# delete it
delete_timestamp = next(ts)
broker.delete_db(delete_timestamp.internal)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, True) # sanity
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], start.internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], delete_timestamp)
# bring back to life
broker.put_object('obj', next(ts).internal, 0, 'text/plain', 'etag',
storage_policy_index=broker.storage_policy_index)
info, is_deleted = broker.get_info_is_deleted()
self.assertEqual(is_deleted, False) # sanity
self.assertEqual(is_deleted, broker.is_deleted())
self.assertEqual(info, broker.get_info())
self.assertEqual(info['put_timestamp'], start.internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], delete_timestamp)
def test_delete_object(self):
# Test ContainerBroker.delete_object
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('o', Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', Timestamp.now().internal)
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
def test_put_object(self):
# Test ContainerBroker.put_object
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
# Create initial object
timestamp = Timestamp.now().internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
# commit pending file into db
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Reput same event
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = Timestamp.now().internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old event
otimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_object('"{<object \'&\' name>}"', otimestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old delete event
dtimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_object('"{<object \'&\' name>}"', dtimestamp, 0, '', '',
deleted=1)
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = Timestamp.now().internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 0, '', '',
deleted=1)
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = Timestamp.now().internal
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# We'll use this later
sleep(.0001)
in_between_timestamp = Timestamp.now().internal
# New post event
sleep(.0001)
previous_timestamp = timestamp
timestamp = Timestamp.now().internal
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0],
previous_timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.put_object('"{<object \'&\' name>}"', timestamp, 456,
'application/x-test3',
'6af83e3196bf99f440f31f2e1a6c9afe')
broker._commit_puts()
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], 456)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test3')
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'6af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
def test_merge_shard_range_single_record(self):
# Test ContainerBroker.merge_shard_range
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
# Stash these for later
old_put_timestamp = next(self.ts).internal
old_delete_timestamp = next(self.ts).internal
# Create initial object
timestamp = next(self.ts).internal
meta_timestamp = next(self.ts).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'low', 'up', meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'low')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'up')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 0)
# Reput same event
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'low', 'up', meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'low')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'up')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 0)
# Mark it as reported
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'low', 'up', meta_timestamp=meta_timestamp,
reported=True))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'low')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'up')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 1)
# Reporting latches it
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'low', 'up', meta_timestamp=meta_timestamp,
reported=False))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'low')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'up')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 1)
# Put new event
timestamp = next(self.ts).internal
meta_timestamp = next(self.ts).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lower')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upper')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 0)
# Put old event
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', old_put_timestamp,
'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp,
reported=True))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp) # Not old_put_timestamp!
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lower')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upper')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
self.assertEqual(conn.execute(
"SELECT reported FROM shard_range").fetchone()[0], 0)
# Put old delete event
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', old_delete_timestamp,
'lower', 'upper', meta_timestamp=meta_timestamp,
deleted=1))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp) # Not old_delete_timestamp!
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lower')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upper')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
# Put new delete event
timestamp = next(self.ts).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lower', 'upper', meta_timestamp=meta_timestamp,
deleted=1))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 1)
# Put new event
timestamp = next(self.ts).internal
meta_timestamp = next(self.ts).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowerer', 'upperer', 3, 4,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowerer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upperer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 3)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 4)
# We'll use this later
in_between_timestamp = next(self.ts).internal
# New update event, meta_timestamp increases
meta_timestamp = next(self.ts).internal
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowerer', 'upperer', 3, 4,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowerer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'upperer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 3)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 4)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
'lowererer', 'uppererer', 5, 6,
meta_timestamp=meta_timestamp))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
'"a/{<shardrange \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT timestamp FROM shard_range").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT meta_timestamp FROM shard_range").fetchone()[0],
meta_timestamp)
self.assertEqual(conn.execute(
"SELECT lower FROM shard_range").fetchone()[0], 'lowererer')
self.assertEqual(conn.execute(
"SELECT upper FROM shard_range").fetchone()[0], 'uppererer')
self.assertEqual(conn.execute(
"SELECT deleted FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT object_count FROM shard_range").fetchone()[0], 5)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 6)
def test_merge_shard_ranges_deleted(self):
# Test ContainerBroker.merge_shard_ranges sets deleted attribute
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
# put shard range
broker.merge_shard_ranges(ShardRange('a/o', next(self.ts).internal))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 1").fetchone()[0], 0)
# delete shard range
broker.merge_shard_ranges(ShardRange('a/o', next(self.ts).internal,
deleted=1))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM shard_range "
"WHERE deleted = 1").fetchone()[0], 1)
def test_make_tuple_for_pickle(self):
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None}
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', None, None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['ctype_timestamp'] = '2233445566.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['meta_timestamp'] = '5566778899.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', '5566778899.00000')
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
@with_tempdir
def test_load_old_record_from_pending_file(self, tempdir):
# Test reading old update record from pending file
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(time(), 0)
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
# simulate existing pending items written with old code,
# i.e. without content_type and meta timestamps
def old_make_tuple_for_pickle(_, record):
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'])
_new = 'swift.container.backend.ContainerBroker.make_tuple_for_pickle'
with mock.patch(_new, old_make_tuple_for_pickle):
broker.put_record(dict(record))
self.assertTrue(os.path.getsize(broker.pending_file) > 0)
read_items = []
def mock_merge_items(_, item_list, *args):
# capture the items read from the pending file
read_items.extend(item_list)
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
broker._commit_puts()
self.assertEqual(1, len(read_items))
self.assertEqual(record, read_items[0])
self.assertTrue(os.path.getsize(broker.pending_file) == 0)
@with_tempdir
def test_save_and_load_record_from_pending_file(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(time(), 0)
record = {'name': 'obj',
'created_at': '1234567890.12345',
'size': 42,
'content_type': 'text/plain',
'etag': 'hash_test',
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': '1234567890.44444',
'meta_timestamp': '1234567890.99999'}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
broker.put_record(dict(record))
self.assertTrue(os.path.getsize(broker.pending_file) > 0)
read_items = []
def mock_merge_items(_, item_list, *args):
# capture the items read from the pending file
read_items.extend(item_list)
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
broker._commit_puts()
self.assertEqual(1, len(read_items))
self.assertEqual(record, read_items[0])
self.assertTrue(os.path.getsize(broker.pending_file) == 0)
def _assert_db_row(self, broker, name, timestamp, size, content_type, hash,
deleted=0):
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM object").fetchone()[0], name)
self.assertEqual(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEqual(conn.execute(
"SELECT size FROM object").fetchone()[0], size)
self.assertEqual(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
content_type)
self.assertEqual(conn.execute(
"SELECT etag FROM object").fetchone()[0], hash)
self.assertEqual(conn.execute(
"SELECT deleted FROM object").fetchone()[0], deleted)
def _test_put_object_multiple_encoded_timestamps(self, broker):
ts = make_timestamp_iter()
broker.initialize(next(ts).internal, 0)
t = [next(ts) for _ in range(9)]
# Create initial object
broker.put_object('obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
# hash and size change with same data timestamp are ignored
t_encoded = encode_timestamps(t[0], t[1], t[1])
broker.put_object('obj_name', t_encoded, 456,
'application/x-test-2',
'1234567890abcdeffedcba0987654321')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# content-type change with same timestamp is ignored
t_encoded = encode_timestamps(t[0], t[1], t[2])
broker.put_object('obj_name', t_encoded, 456,
'application/x-test-3',
'1234567890abcdeffedcba0987654321')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# update with differing newer timestamps
t_encoded = encode_timestamps(t[4], t[6], t[8])
broker.put_object('obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
# update with differing older timestamps should be ignored
t_encoded_older = encode_timestamps(t[3], t[5], t[7])
self.assertEqual(1, len(broker.get_items_since(0, 100)))
broker.put_object('obj_name', t_encoded_older, 9999,
'application/x-test-ignored',
'ignored_hash')
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
def test_put_object_multiple_encoded_timestamps_using_memory(self):
# Test ContainerBroker.put_object with differing data, content-type
# and metadata timestamps
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
self._test_put_object_multiple_encoded_timestamps(broker)
@with_tempdir
def test_has_other_shard_ranges(self, tempdir):
acct = 'account'
cont = 'container'
hsh = hash_path(acct, cont)
epoch = Timestamp.now()
db_file = "%s_%s.db" % (hsh, epoch.normal)
db_path = os.path.join(tempdir, db_file)
ts = Timestamp.now()
broker = ContainerBroker(db_path, account=acct,
container=cont, force_db_file=True)
# Create the test container database and all the tables.
broker.initialize(ts.internal, 0)
# Test the case which the 'shard_range' table doesn't exist yet.
self._delete_table(broker, 'shard_range')
self.assertFalse(broker.has_other_shard_ranges())
# Add the 'shard_range' table back to the database, but it doesn't
# have any shard range row in it yet.
self._add_shard_range_table(broker)
shard_ranges = broker.get_shard_ranges(
include_deleted=True, states=None, include_own=True)
self.assertEqual(shard_ranges, [])
self.assertFalse(broker.has_other_shard_ranges())
# Insert its 'own_shard_range' into this test database.
own_shard_range = broker.get_own_shard_range()
own_shard_range.update_state(ShardRange.SHARDING)
own_shard_range.epoch = epoch
broker.merge_shard_ranges([own_shard_range])
self.assertTrue(broker.get_shard_ranges(include_own=True))
self.assertFalse(broker.has_other_shard_ranges())
# Insert a child shard range into this test database.
first_child_sr = ShardRange(
'.shards_%s/%s_1' % (acct, cont), Timestamp.now())
broker.merge_shard_ranges([first_child_sr])
self.assertTrue(broker.has_other_shard_ranges())
# Mark the first child shard range as deleted.
first_child_sr.deleted = 1
first_child_sr.timestamp = Timestamp.now()
broker.merge_shard_ranges([first_child_sr])
self.assertFalse(broker.has_other_shard_ranges())
# Insert second child shard range into this test database.
second_child_sr = ShardRange(
'.shards_%s/%s_2' % (acct, cont), Timestamp.now())
broker.merge_shard_ranges([second_child_sr])
self.assertTrue(broker.has_other_shard_ranges())
# Mark the 'own_shard_range' as deleted.
own_shard_range.deleted = 1
own_shard_range.timestamp = Timestamp.now()
broker.merge_shard_ranges([own_shard_range])
self.assertTrue(broker.has_other_shard_ranges())
@with_tempdir
def test_get_db_state(self, tempdir):
acct = 'account'
cont = 'container'
hsh = hash_path(acct, cont)
db_file = "%s.db" % hsh
epoch = Timestamp.now()
fresh_db_file = "%s_%s.db" % (hsh, epoch.normal)
db_path = os.path.join(tempdir, db_file)
fresh_db_path = os.path.join(tempdir, fresh_db_file)
ts = Timestamp.now()
# First test NOTFOUND state
broker = ContainerBroker(db_path, account=acct, container=cont)
self.assertEqual(broker.get_db_state(), 'not_found')
# Test UNSHARDED state, that is when db_file exists and fresh_db_file
# doesn't
broker.initialize(ts.internal, 0)
self.assertEqual(broker.get_db_state(), 'unsharded')
# Test the SHARDING state, this is the period when both the db_file and
# the fresh_db_file exist
fresh_broker = ContainerBroker(fresh_db_path, account=acct,
container=cont, force_db_file=True)
fresh_broker.initialize(ts.internal, 0)
own_shard_range = fresh_broker.get_own_shard_range()
own_shard_range.update_state(ShardRange.SHARDING)
own_shard_range.epoch = epoch
shard_range = ShardRange(
'.shards_%s/%s' % (acct, cont), Timestamp.now())
fresh_broker.merge_shard_ranges([own_shard_range, shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'sharding')
# old broker will also change state if we reload its db files
broker.reload_db_files()
self.assertEqual(broker.get_db_state(), 'sharding')
# Test the SHARDED state, this is when only fresh_db_file exists.
os.unlink(db_path)
fresh_broker.reload_db_files()
self.assertEqual(fresh_broker.get_db_state(), 'sharded')
# Test the COLLAPSED state, this is when only fresh_db_file exists.
shard_range.deleted = 1
shard_range.timestamp = Timestamp.now()
fresh_broker.merge_shard_ranges([shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'collapsed')
# back to UNSHARDED if the desired epoch changes
own_shard_range.update_state(ShardRange.SHRINKING,
state_timestamp=Timestamp.now())
own_shard_range.epoch = Timestamp.now()
fresh_broker.merge_shard_ranges([own_shard_range])
self.assertEqual(fresh_broker.get_db_state(), 'unsharded')
@with_tempdir
def test_delete_db_does_not_clear_particular_sharding_meta(self, tempdir):
acct = '.sharded_a'
cont = 'c'
hsh = hash_path(acct, cont)
db_file = "%s.db" % hsh
db_path = os.path.join(tempdir, db_file)
ts = Timestamp(0).normal
broker = ContainerBroker(db_path, account=acct, container=cont)
broker.initialize(ts, 0)
# add some metadata but include both types of root path
broker.update_metadata({
'foo': ('bar', ts),
'icecream': ('sandwich', ts),
'X-Container-Sysmeta-Some': ('meta', ts),
'X-Container-Sysmeta-Sharding': ('yes', ts),
'X-Container-Sysmeta-Shard-Quoted-Root': ('a/c', ts),
'X-Container-Sysmeta-Shard-Root': ('a/c', ts)})
self.assertEqual('a/c', broker.root_path)
# now let's delete the db. All meta
delete_ts = Timestamp(1).normal
broker.delete_db(delete_ts)
# ensure that metadata was cleared except for root paths
def check_metadata(broker):
meta = broker.metadata
self.assertEqual(meta['X-Container-Sysmeta-Some'], ['', delete_ts])
self.assertEqual(meta['icecream'], ['', delete_ts])
self.assertEqual(meta['foo'], ['', delete_ts])
self.assertEqual(meta['X-Container-Sysmeta-Shard-Quoted-Root'],
['a/c', ts])
self.assertEqual(meta['X-Container-Sysmeta-Shard-Root'],
['a/c', ts])
self.assertEqual('a/c', broker.root_path)
self.assertEqual(meta['X-Container-Sysmeta-Sharding'],
['yes', ts])
self.assertFalse(broker.is_root_container())
check_metadata(broker)
# fresh broker in case values were cached in previous instance
broker = ContainerBroker(db_path)
check_metadata(broker)
@with_tempdir
def test_db_file(self, tempdir):
acct = 'account'
cont = 'continer'
hsh = hash_path(acct, cont)
db_file = "%s.db" % hsh
ts_epoch = Timestamp.now()
fresh_db_file = "%s_%s.db" % (hsh, ts_epoch.normal)
db_path = os.path.join(tempdir, db_file)
fresh_db_path = os.path.join(tempdir, fresh_db_file)
ts = Timestamp.now()
# First test NOTFOUND state, this will return the db_file passed
# in the constructor
def check_unfound_db_files(broker, init_db_file):
self.assertEqual(init_db_file, broker.db_file)
self.assertEqual(broker._db_file, db_path)
self.assertFalse(os.path.exists(db_path))
self.assertFalse(os.path.exists(fresh_db_path))
self.assertEqual([], broker.db_files)
broker = ContainerBroker(db_path, account=acct, container=cont)
check_unfound_db_files(broker, db_path)
broker = ContainerBroker(fresh_db_path, account=acct, container=cont)
check_unfound_db_files(broker, fresh_db_path)
# Test UNSHARDED state, that is when db_file exists and fresh_db_file
# doesn't, so it should return the db_path
def check_unsharded_db_files(broker):
self.assertEqual(broker.db_file, db_path)
self.assertEqual(broker._db_file, db_path)
self.assertTrue(os.path.exists(db_path))
self.assertFalse(os.path.exists(fresh_db_path))
self.assertEqual([db_path], broker.db_files)
broker = ContainerBroker(db_path, account=acct, container=cont)
broker.initialize(ts.internal, 0)
check_unsharded_db_files(broker)
broker = ContainerBroker(fresh_db_path, account=acct, container=cont)
check_unsharded_db_files(broker)
# while UNSHARDED db_path is still used despite giving fresh_db_path
# to init, so we cannot initialize this broker
with self.assertRaises(DatabaseAlreadyExists):
broker.initialize(ts.internal, 0)
# Test the SHARDING state, this is the period when both the db_file and
# the fresh_db_file exist, in this case it should return the
# fresh_db_path.
def check_sharding_db_files(broker):
self.assertEqual(broker.db_file, fresh_db_path)
self.assertEqual(broker._db_file, db_path)
self.assertTrue(os.path.exists(db_path))
self.assertTrue(os.path.exists(fresh_db_path))
self.assertEqual([db_path, fresh_db_path], broker.db_files)
# Use force_db_file to have db_shard_path created when initializing
broker = ContainerBroker(fresh_db_path, account=acct,
container=cont, force_db_file=True)
self.assertEqual([db_path], broker.db_files)
broker.initialize(ts.internal, 0)
check_sharding_db_files(broker)
broker = ContainerBroker(db_path, account=acct, container=cont)
check_sharding_db_files(broker)
broker = ContainerBroker(fresh_db_path, account=acct, container=cont)
check_sharding_db_files(broker)
# force_db_file can be used to open db_path specifically
forced_broker = ContainerBroker(db_path, account=acct,
container=cont, force_db_file=True)
self.assertEqual(forced_broker.db_file, db_path)
self.assertEqual(forced_broker._db_file, db_path)
def check_sharded_db_files(broker):
self.assertEqual(broker.db_file, fresh_db_path)
self.assertEqual(broker._db_file, db_path)
self.assertFalse(os.path.exists(db_path))
self.assertTrue(os.path.exists(fresh_db_path))
self.assertEqual([fresh_db_path], broker.db_files)
# Test the SHARDED state, this is when only fresh_db_file exists, so
# obviously this should return the fresh_db_path
os.unlink(db_path)
broker.reload_db_files()
check_sharded_db_files(broker)
broker = ContainerBroker(db_path, account=acct, container=cont)
check_sharded_db_files(broker)
@with_tempdir
def test_sharding_initiated_and_required(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', '%s.db' % uuid4())
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(Timestamp.now().internal, 0)
# no shard ranges
self.assertIs(False, broker.sharding_initiated())
self.assertIs(False, broker.sharding_required())
# only own shard range
own_sr = broker.get_own_shard_range()
for state in ShardRange.STATES:
own_sr.update_state(state, state_timestamp=Timestamp.now())
broker.merge_shard_ranges(own_sr)
self.assertIs(False, broker.sharding_initiated())
self.assertIs(False, broker.sharding_required())
# shard ranges, still ACTIVE
own_sr.update_state(ShardRange.ACTIVE,
state_timestamp=Timestamp.now())
broker.merge_shard_ranges(own_sr)
broker.merge_shard_ranges(ShardRange('.shards_a/cc', Timestamp.now()))
self.assertIs(False, broker.sharding_initiated())
self.assertIs(False, broker.sharding_required())
# shard ranges and SHARDING, SHRINKING or SHARDED
broker.enable_sharding(Timestamp.now())
self.assertTrue(broker.set_sharding_state())
self.assertIs(True, broker.sharding_initiated())
self.assertIs(True, broker.sharding_required())
epoch = broker.db_epoch
own_sr.update_state(ShardRange.SHRINKING,
state_timestamp=Timestamp.now())
own_sr.epoch = epoch
broker.merge_shard_ranges(own_sr)
self.assertIs(True, broker.sharding_initiated())
self.assertIs(True, broker.sharding_required())
own_sr.update_state(ShardRange.SHARDED)
broker.merge_shard_ranges(own_sr)
self.assertTrue(broker.set_sharded_state())
self.assertIs(True, broker.sharding_initiated())
self.assertIs(False, broker.sharding_required())
@with_tempdir
def test_put_object_multiple_encoded_timestamps_using_file(self, tempdir):
# Test ContainerBroker.put_object with differing data, content-type
# and metadata timestamps, using file db to ensure that the code paths
# to write/read pending file are exercised.
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
self._test_put_object_multiple_encoded_timestamps(broker)
def _test_put_object_multiple_explicit_timestamps(self, broker):
ts = make_timestamp_iter()
broker.initialize(next(ts).internal, 0)
t = [next(ts) for _ in range(11)]
# Create initial object
broker.put_object('obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe',
ctype_timestamp=None,
meta_timestamp=None)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t[0].internal, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
# hash and size change with same data timestamp are ignored
t_encoded = encode_timestamps(t[0], t[1], t[1])
broker.put_object('obj_name', t[0].internal, 456,
'application/x-test-2',
'1234567890abcdeffedcba0987654321',
ctype_timestamp=t[1].internal,
meta_timestamp=t[1].internal)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# content-type change with same timestamp is ignored
t_encoded = encode_timestamps(t[0], t[1], t[2])
broker.put_object('obj_name', t[0].internal, 456,
'application/x-test-3',
'1234567890abcdeffedcba0987654321',
ctype_timestamp=t[1].internal,
meta_timestamp=t[2].internal)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 123,
'application/x-test-2',
'5af83e3196bf99f440f31f2e1a6c9afe')
# update with differing newer timestamps
t_encoded = encode_timestamps(t[4], t[6], t[8])
broker.put_object('obj_name', t[4].internal, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890',
ctype_timestamp=t[6].internal,
meta_timestamp=t[8].internal)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
# update with differing older timestamps should be ignored
broker.put_object('obj_name', t[3].internal, 9999,
'application/x-test-ignored',
'ignored_hash',
ctype_timestamp=t[5].internal,
meta_timestamp=t[7].internal)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 789,
'application/x-test-3',
'abcdef1234567890abcdef1234567890')
# content_type_timestamp == None defaults to data timestamp
t_encoded = encode_timestamps(t[9], t[9], t[8])
broker.put_object('obj_name', t[9].internal, 9999,
'application/x-test-new',
'new_hash',
ctype_timestamp=None,
meta_timestamp=t[7].internal)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 9999,
'application/x-test-new',
'new_hash')
# meta_timestamp == None defaults to data timestamp
t_encoded = encode_timestamps(t[9], t[10], t[10])
broker.put_object('obj_name', t[8].internal, 1111,
'application/x-test-newer',
'older_hash',
ctype_timestamp=t[10].internal,
meta_timestamp=None)
self.assertEqual(1, len(broker.get_items_since(0, 100)))
self._assert_db_row(broker, 'obj_name', t_encoded, 9999,
'application/x-test-newer',
'new_hash')
def test_put_object_multiple_explicit_timestamps_using_memory(self):
# Test ContainerBroker.put_object with differing data, content-type
# and metadata timestamps passed as explicit args
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
self._test_put_object_multiple_explicit_timestamps(broker)
@with_tempdir
def test_put_object_multiple_explicit_timestamps_using_file(self, tempdir):
# Test ContainerBroker.put_object with differing data, content-type
# and metadata timestamps passed as explicit args, using file db to
# ensure that the code paths to write/read pending file are exercised.
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
self._test_put_object_multiple_explicit_timestamps(broker)
def test_last_modified_time(self):
# Test container listing reports the most recent of data or metadata
# timestamp as last-modified time
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(next(ts).internal, 0)
# simple 'single' timestamp case
t0 = next(ts)
broker.put_object('obj1', t0.internal, 0, 'text/plain', 'hash1')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 1)
self.assertEqual(listing[0][0], 'obj1')
self.assertEqual(listing[0][1], t0.internal)
# content-type and metadata are updated at t1
t1 = next(ts)
t_encoded = encode_timestamps(t0, t1, t1)
broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 1)
self.assertEqual(listing[0][0], 'obj1')
self.assertEqual(listing[0][1], t1.internal)
# used later
t2 = next(ts)
# metadata is updated at t3
t3 = next(ts)
t_encoded = encode_timestamps(t0, t1, t3)
broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 1)
self.assertEqual(listing[0][0], 'obj1')
self.assertEqual(listing[0][1], t3.internal)
# all parts updated at t2, last-modified should remain at t3
t_encoded = encode_timestamps(t2, t2, t2)
broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 1)
self.assertEqual(listing[0][0], 'obj1')
self.assertEqual(listing[0][1], t3.internal)
# all parts updated at t4, last-modified should be t4
t4 = next(ts)
t_encoded = encode_timestamps(t4, t4, t4)
broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 1)
self.assertEqual(listing[0][0], 'obj1')
self.assertEqual(listing[0][1], t4.internal)
@patch_policies
def test_put_misplaced_object_does_not_effect_container_stats(self):
policy = random.choice(list(POLICIES))
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker.initialize(next(ts).internal, policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = [p for p in POLICIES
if p.idx == real_storage_policy_index][0]
broker.put_object('correct_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
info = broker.get_info()
self.assertEqual(1, info['object_count'])
self.assertEqual(123, info['bytes_used'])
other_policy = random.choice([p for p in POLICIES
if p is not policy])
broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
self.assertEqual(1, info['object_count'])
self.assertEqual(123, info['bytes_used'])
@patch_policies
def test_has_multiple_policies(self):
policy = random.choice(list(POLICIES))
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker.initialize(next(ts).internal, policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = [p for p in POLICIES
if p.idx == real_storage_policy_index][0]
broker.put_object('correct_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
# commit pending file into db
broker._commit_puts()
self.assertFalse(broker.has_multiple_policies())
other_policy = [p for p in POLICIES if p is not policy][0]
broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
broker._commit_puts()
self.assertTrue(broker.has_multiple_policies())
@patch_policies
def test_get_policy_info(self):
policy = random.choice(list(POLICIES))
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker.initialize(next(ts).internal, policy.idx)
# migration tests may not honor policy on initialize
if isinstance(self, ContainerBrokerMigrationMixin):
real_storage_policy_index = \
broker.get_info()['storage_policy_index']
policy = [p for p in POLICIES
if p.idx == real_storage_policy_index][0]
policy_stats = broker.get_policy_stats()
expected = {policy.idx: {'bytes_used': 0, 'object_count': 0}}
self.assertEqual(policy_stats, expected)
# add an object
broker.put_object('correct_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy.idx)
# commit pending file into db
broker._commit_puts()
policy_stats = broker.get_policy_stats()
expected = {policy.idx: {'bytes_used': 123, 'object_count': 1}}
self.assertEqual(policy_stats, expected)
# add a misplaced object
other_policy = random.choice([p for p in POLICIES
if p is not policy])
broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=other_policy.idx)
broker._commit_puts()
policy_stats = broker.get_policy_stats()
expected = {
policy.idx: {'bytes_used': 123, 'object_count': 1},
other_policy.idx: {'bytes_used': 123, 'object_count': 1},
}
self.assertEqual(policy_stats, expected)
@patch_policies
def test_policy_stat_tracking(self):
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
# Note: in subclasses of this TestCase that inherit the
# ContainerBrokerMigrationMixin, passing POLICIES.default.idx here has
# no effect and broker.get_policy_stats() returns a dict with a single
# entry mapping policy index 0 to the container stats
broker.initialize(next(ts).internal, POLICIES.default.idx)
stats = defaultdict(dict)
def assert_empty_default_policy_stats(policy_stats):
# if no objects were added for the default policy we still
# expect an entry for the default policy in the returned info
# because the database was initialized with that storage policy
# - but it must be empty.
default_stats = policy_stats[POLICIES.default.idx]
expected = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(default_stats, expected)
policy_stats = broker.get_policy_stats()
assert_empty_default_policy_stats(policy_stats)
iters = 100
for i in range(iters):
policy_index = random.randint(0, iters // 10)
name = 'object-%s' % random.randint(0, iters // 10)
size = random.randint(0, iters)
broker.put_object(name, next(ts).internal, size, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe',
storage_policy_index=policy_index)
# track the size of the latest timestamp put for each object
# in each storage policy
stats[policy_index][name] = size
# commit pending file into db
broker._commit_puts()
policy_stats = broker.get_policy_stats()
if POLICIES.default.idx not in stats:
# unlikely, but check empty default index still in policy stats
assert_empty_default_policy_stats(policy_stats)
policy_stats.pop(POLICIES.default.idx)
self.assertEqual(len(policy_stats), len(stats))
for policy_index, stat in policy_stats.items():
self.assertEqual(stat['object_count'], len(stats[policy_index]))
self.assertEqual(stat['bytes_used'],
sum(stats[policy_index].values()))
def test_initialize_container_broker_in_default(self):
broker = ContainerBroker(self.get_db_path(), account='test1',
container='test2')
# initialize with no storage_policy_index argument
broker.initialize(Timestamp(1).internal)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
policy_stats = broker.get_policy_stats()
# Act as policy-0
self.assertTrue(0 in policy_stats)
self.assertEqual(policy_stats[0]['bytes_used'], 0)
self.assertEqual(policy_stats[0]['object_count'], 0)
broker.put_object('o1', Timestamp.now().internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
policy_stats = broker.get_policy_stats()
self.assertTrue(0 in policy_stats)
self.assertEqual(policy_stats[0]['object_count'], 1)
self.assertEqual(policy_stats[0]['bytes_used'], 123)
def test_get_info(self):
# Test ContainerBroker.get_info
broker = ContainerBroker(self.get_db_path(), account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ in (
TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI,
TestContainerBrokerBeforeShardRanges,
TestContainerBrokerBeforeShardRangeReportedColumn,
TestContainerBrokerBeforeShardRangeTombstonesColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
Timestamp(1).internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
broker.put_object('o1', Timestamp.now().internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
sleep(.00001)
broker.put_object('o2', Timestamp.now().internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 246)
sleep(.00001)
broker.put_object('o2', Timestamp.now().internal, 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', Timestamp.now().internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 1000)
sleep(.00001)
broker.delete_object('o2', Timestamp.now().internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
@with_tempdir
def test_get_info_sharding_states(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'hash.db')
broker = ContainerBroker(
db_path, account='myaccount', container='mycontainer')
broker.initialize(next(self.ts).internal, 0)
broker.put_object('o1', next(self.ts).internal, 123, 'text/plain',
'fake etag')
sr = ShardRange('.shards_a/c', next(self.ts))
broker.merge_shard_ranges(sr)
def check_info(expected):
errors = []
for k, v in expected.items():
if info.get(k) != v:
errors.append((k, v, info.get(k)))
if errors:
self.fail('Mismatches: %s' % ', '.join(
['%s should be %s but got %s' % error
for error in errors]))
# unsharded
with mock.patch.object(
broker, 'get_shard_usage') as mock_get_shard_usage:
info = broker.get_info()
mock_get_shard_usage.assert_not_called()
check_info({'account': 'myaccount',
'container': 'mycontainer',
'object_count': 1,
'bytes_used': 123,
'db_state': 'unsharded'})
# sharding
epoch = next(self.ts)
broker.enable_sharding(epoch)
self.assertTrue(broker.set_sharding_state())
broker.put_object('o2', next(self.ts).internal, 1, 'text/plain',
'fake etag')
broker.put_object('o3', next(self.ts).internal, 320, 'text/plain',
'fake etag')
with mock.patch.object(
broker, 'get_shard_usage') as mock_get_shard_usage:
info = broker.get_info()
mock_get_shard_usage.assert_not_called()
check_info({'account': 'myaccount',
'container': 'mycontainer',
'object_count': 1,
'bytes_used': 123,
'db_state': 'sharding'})
# sharded
self.assertTrue(broker.set_sharded_state())
shard_stats = {'object_count': 1001, 'bytes_used': 3003}
with mock.patch.object(
broker, 'get_shard_usage') as mock_get_shard_usage:
mock_get_shard_usage.return_value = shard_stats
info = broker.get_info()
mock_get_shard_usage.assert_called_once_with()
check_info({'account': 'myaccount',
'container': 'mycontainer',
'object_count': 1001,
'bytes_used': 3003,
'db_state': 'sharded'})
# collapsed
sr.set_deleted(next(self.ts))
broker.merge_shard_ranges(sr)
with mock.patch.object(
broker, 'get_shard_usage') as mock_get_shard_usage:
info = broker.get_info()
mock_get_shard_usage.assert_not_called()
check_info({'account': 'myaccount',
'container': 'mycontainer',
'object_count': 2,
'bytes_used': 321,
'db_state': 'collapsed'})
def test_set_x_syncs(self):
broker = ContainerBroker(self.get_db_path(), account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
broker.set_x_container_sync_points(1, 2)
info = broker.get_info()
self.assertEqual(info['x_container_sync_point1'], 1)
self.assertEqual(info['x_container_sync_point2'], 2)
def test_get_report_info(self):
broker = ContainerBroker(self.get_db_path(), account='test1',
container='test2')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['container'], 'test2')
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
broker.put_object('o1', Timestamp.now().internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', Timestamp.now().internal, 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 246)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', Timestamp.now().internal, 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
put_timestamp = Timestamp.now().internal
sleep(.001)
delete_timestamp = Timestamp.now().internal
broker.reported(put_timestamp, delete_timestamp, 2, 1123)
info = broker.get_info()
self.assertEqual(info['object_count'], 2)
self.assertEqual(info['bytes_used'], 1123)
self.assertEqual(info['reported_put_timestamp'], put_timestamp)
self.assertEqual(info['reported_delete_timestamp'], delete_timestamp)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', Timestamp.now().internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 1000)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o2', Timestamp.now().internal)
info = broker.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 2)
self.assertEqual(info['reported_bytes_used'], 1123)
@with_tempdir
def test_get_replication_info(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'hash.db')
broker = ContainerBroker(
db_path, account='myaccount', container='mycontainer')
broker.initialize(next(self.ts).internal, 0)
metadata = {'blah': ['val', next(self.ts).internal]}
broker.update_metadata(metadata)
expected = broker.get_info()
expected['metadata'] = json.dumps(metadata)
expected.pop('object_count')
expected['count'] = 0
expected['max_row'] = -1
expected['shard_max_row'] = -1
actual = broker.get_replication_info()
self.assertEqual(expected, actual)
broker.put_object('o1', next(self.ts).internal, 123, 'text/plain',
'fake etag')
expected = broker.get_info()
expected['metadata'] = json.dumps(metadata)
expected.pop('object_count')
expected['count'] = 1
expected['max_row'] = 1
expected['shard_max_row'] = -1
actual = broker.get_replication_info()
self.assertEqual(expected, actual)
sr = ShardRange('.shards_a/c', next(self.ts))
broker.merge_shard_ranges(sr)
expected['shard_max_row'] = 1
actual = broker.get_replication_info()
self.assertEqual(expected, actual)
@with_tempdir
def test_remove_objects(self, tempdir):
objects = (('undeleted', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG, 0, 0),
('other_policy', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG, 0, 1),
('deleted', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG, 1, 0))
object_names = [o[0] for o in objects]
def get_rows(broker):
with broker.get() as conn:
cursor = conn.execute("SELECT * FROM object")
return [r[1] for r in cursor]
def do_setup():
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix',
'hash', '%s.db' % uuid4())
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(Timestamp.now().internal, 0)
for obj in objects:
# ensure row order matches put order
broker.put_object(*obj)
broker._commit_puts()
self.assertEqual(3, broker.get_max_row()) # sanity check
self.assertEqual(object_names, get_rows(broker)) # sanity check
return broker
broker = do_setup()
broker.remove_objects('', '')
self.assertFalse(get_rows(broker))
broker = do_setup()
broker.remove_objects('deleted', '')
self.assertEqual([object_names[2]], get_rows(broker))
broker = do_setup()
broker.remove_objects('', 'deleted', max_row=2)
self.assertEqual(object_names, get_rows(broker))
broker = do_setup()
broker.remove_objects('deleted', 'un')
self.assertEqual([object_names[0], object_names[2]], get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=-1)
self.assertEqual(object_names, get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=0)
self.assertEqual(object_names, get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=1)
self.assertEqual(object_names[1:], get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=2)
self.assertEqual(object_names[2:], get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=3)
self.assertFalse(get_rows(broker))
broker = do_setup()
broker.remove_objects('', '', max_row=99)
self.assertFalse(get_rows(broker))
def test_get_objects(self):
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
objects_0 = [{'name': 'obj_0_%d' % i,
'created_at': next(self.ts).normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': i % 2,
'storage_policy_index': 0
} for i in range(1, 8)]
objects_1 = [{'name': 'obj_1_%d' % i,
'created_at': next(self.ts).normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': i % 2,
'storage_policy_index': 1
} for i in range(1, 8)]
# merge_objects mutates items
broker.merge_items([dict(obj) for obj in objects_0 + objects_1])
actual = broker.get_objects()
self.assertEqual(objects_0 + objects_1, actual)
with mock.patch('swift.container.backend.CONTAINER_LISTING_LIMIT', 2):
actual = broker.get_objects()
self.assertEqual(objects_0[:2], actual)
with mock.patch('swift.container.backend.CONTAINER_LISTING_LIMIT', 2):
actual = broker.get_objects(limit=9)
self.assertEqual(objects_0 + objects_1[:2], actual)
actual = broker.get_objects(marker=objects_0[2]['name'])
self.assertEqual(objects_0[3:] + objects_1, actual)
actual = broker.get_objects(end_marker=objects_0[2]['name'])
self.assertEqual(objects_0[:2], actual)
actual = broker.get_objects(include_deleted=True)
self.assertEqual(objects_0[::2] + objects_1[::2], actual)
actual = broker.get_objects(include_deleted=False)
self.assertEqual(objects_0[1::2] + objects_1[1::2], actual)
actual = broker.get_objects(include_deleted=None)
self.assertEqual(objects_0 + objects_1, actual)
def test_get_objects_since_row(self):
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
obj_names = ['obj%03d' % i for i in range(20)]
timestamps = [next(self.ts) for o in obj_names]
for name, timestamp in zip(obj_names, timestamps):
broker.put_object(name, timestamp.internal,
0, 'text/plain', EMPTY_ETAG)
broker._commit_puts() # ensure predictable row order
timestamps = [next(self.ts) for o in obj_names[10:]]
for name, timestamp in zip(obj_names[10:], timestamps):
broker.put_object(name, timestamp.internal,
0, 'text/plain', EMPTY_ETAG, deleted=1)
broker._commit_puts() # ensure predictable row order
# sanity check
self.assertEqual(30, broker.get_max_row())
actual = broker.get_objects()
self.assertEqual(obj_names, [o['name'] for o in actual])
# all rows included
actual = broker.get_objects(since_row=None)
self.assertEqual(obj_names, [o['name'] for o in actual])
actual = broker.get_objects(since_row=-1)
self.assertEqual(obj_names, [o['name'] for o in actual])
# selected rows
for since_row in range(10):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
self.assertEqual(obj_names[since_row:],
[o['name'] for o in actual])
for since_row in range(10, 20):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
self.assertEqual(obj_names[10:],
[o['name'] for o in actual])
for since_row in range(20, len(obj_names) + 1):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
self.assertEqual(obj_names[since_row - 10:],
[o['name'] for o in actual])
self.assertFalse(broker.get_objects(end_marker=obj_names[5],
since_row=5))
def test_list_objects_iter(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
for obj1 in range(4):
for obj2 in range(125):
broker.put_object('%d/%04d' % (obj1, obj2),
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('2/0051/%04d' % obj,
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('3/%04d/0049' % obj,
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0099')
listing = broker.list_objects_iter(100, '', '0/0050', None, '')
self.assertEqual(len(listing), 50)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0049')
listing = broker.list_objects_iter(100, '0/0099', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0/0100')
self.assertEqual(listing[-1][0], '1/0074')
listing = broker.list_objects_iter(55, '1/0074', None, None, '')
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '1/0075')
self.assertEqual(listing[-1][0], '2/0004')
listing = broker.list_objects_iter(55, '2/0005', None, None, '',
reverse=True)
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '2/0004')
self.assertEqual(listing[-1][0], '1/0075')
listing = broker.list_objects_iter(10, '', None, '0/01', '')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0100')
self.assertEqual(listing[-1][0], '0/0109')
listing = broker.list_objects_iter(10, '', None, '0/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, '0/', '/',
reverse=True)
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0124')
self.assertEqual(listing[-1][0], '0/0115')
# Same as above, but using the path argument.
listing = broker.list_objects_iter(10, '', None, None, '', '0')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0000')
self.assertEqual(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, None, '', '0',
reverse=True)
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0/0124')
self.assertEqual(listing[-1][0], '0/0115')
listing = broker.list_objects_iter(10, '', None, '', '/')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_objects_iter(10, '', None, '', '/', reverse=True)
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['3/', '2/', '1/', '0/'])
listing = broker.list_objects_iter(10, '2', None, None, '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['2/', '3/'])
listing = broker.list_objects_iter(10, '2/', None, None, '/')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3/'])
listing = broker.list_objects_iter(10, '2/', None, None, '/',
reverse=True)
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['1/', '0/'])
listing = broker.list_objects_iter(10, '20', None, None, '/',
reverse=True)
self.assertEqual(len(listing), 3)
self.assertEqual([row[0] for row in listing], ['2/', '1/', '0/'])
listing = broker.list_objects_iter(10, '2/0050', None, '2/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '2/0051')
self.assertEqual(listing[1][0], '2/0051/')
self.assertEqual(listing[2][0], '2/0052')
self.assertEqual(listing[-1][0], '2/0059')
listing = broker.list_objects_iter(10, '3/0045', None, '3/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_object('3/0049/', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3/0048', None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/',
'3/0049/0049', '3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_objects_iter(10, '3/0048', None, '3/', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052', '3/0052/', '3/0053'])
listing = broker.list_objects_iter(10, None, None, '3/0049/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['3/0049/', '3/0049/0049'])
listing = broker.list_objects_iter(10, None, None, None, None,
'3/0049')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3/0049/0049'])
listing = broker.list_objects_iter(2, None, None, '3/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3/0000', '3/0000/'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3/0000', '3/0001'])
def test_list_objects_iter_with_reserved_name(self):
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(next(self.ts).internal, 0)
broker.put_object(
'foo', next(self.ts).internal, 0, 0, 0, POLICIES.default.idx)
broker.put_object(
get_reserved_name('foo'), next(self.ts).internal, 0, 0, 0,
POLICIES.default.idx)
listing = broker.list_objects_iter(100, None, None, '', '')
self.assertEqual([row[0] for row in listing], ['foo'])
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=True)
self.assertEqual([row[0] for row in listing], ['foo'])
listing = broker.list_objects_iter(100, None, None, '', '',
allow_reserved=True)
self.assertEqual([row[0] for row in listing],
[get_reserved_name('foo'), 'foo'])
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=True, allow_reserved=True)
self.assertEqual([row[0] for row in listing],
['foo', get_reserved_name('foo')])
def test_reverse_prefix_delim(self):
expectations = [
{
'objects': [
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
'topdir1/subdir1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
},
'expected': [
'topdir1/subdir1.0/',
'topdir1/subdir1.1/',
'topdir1/subdir1/',
],
},
{
'objects': [
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
'topdir1/subdir1/obj1',
'topdir1/subdir10',
'topdir1/subdir10/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
},
'expected': [
'topdir1/subdir1.0/',
'topdir1/subdir1.1/',
'topdir1/subdir1/',
'topdir1/subdir10',
'topdir1/subdir10/',
],
},
{
'objects': [
'topdir1/subdir1/obj1',
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
'reverse': True,
},
'expected': [
'topdir1/subdir1/',
'topdir1/subdir1.1/',
'topdir1/subdir1.0/',
],
},
{
'objects': [
'topdir1/subdir10/obj1',
'topdir1/subdir10',
'topdir1/subdir1/obj1',
'topdir1/subdir1.0/obj1',
'topdir1/subdir1.1/obj1',
],
'params': {
'prefix': 'topdir1/',
'delimiter': '/',
'reverse': True,
},
'expected': [
'topdir1/subdir10/',
'topdir1/subdir10',
'topdir1/subdir1/',
'topdir1/subdir1.1/',
'topdir1/subdir1.0/',
],
},
{
'objects': [
'1',
'2',
'3/1',
'3/2.2',
'3/2/1',
'3/2/2',
'3/3',
'4',
],
'params': {
'path': '3/',
},
'expected': [
'3/1',
'3/2.2',
'3/3',
],
},
{
'objects': [
'1',
'2',
'3/1',
'3/2.2',
'3/2/1',
'3/2/2',
'3/3',
'4',
],
'params': {
'path': '3/',
'reverse': True,
},
'expected': [
'3/3',
'3/2.2',
'3/1',
],
},
]
ts = make_timestamp_iter()
default_listing_params = {
'limit': 10000,
'marker': '',
'end_marker': None,
'prefix': None,
'delimiter': None,
}
obj_create_params = {
'size': 0,
'content_type': 'application/test',
'etag': EMPTY_ETAG,
}
failures = []
for expected in expectations:
broker = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker.initialize(next(ts).internal, 0)
for name in expected['objects']:
broker.put_object(name, next(ts).internal, **obj_create_params)
# commit pending file into db
broker._commit_puts()
params = default_listing_params.copy()
params.update(expected['params'])
listing = list(o[0] for o in broker.list_objects_iter(**params))
if listing != expected['expected']:
expected['listing'] = listing
failures.append(
"With objects %(objects)r, the params %(params)r "
"produced %(listing)r instead of %(expected)r" % expected)
self.assertFalse(failures, "Found the following failures:\n%s" %
'\n'.join(failures))
def test_list_objects_iter_non_slash(self):
# Test ContainerBroker.list_objects_iter using a
# delimiter that is not a slash
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
for obj1 in range(4):
for obj2 in range(125):
broker.put_object('%d:%04d' % (obj1, obj2),
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('2:0051:%04d' % obj,
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in range(125):
broker.put_object('3:%04d:0049' % obj,
Timestamp.now().internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0099')
listing = broker.list_objects_iter(100, '', '0:0050', None, '')
self.assertEqual(len(listing), 50)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0049')
listing = broker.list_objects_iter(100, '0:0099', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0:0100')
self.assertEqual(listing[-1][0], '1:0074')
listing = broker.list_objects_iter(55, '1:0074', None, None, '')
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '1:0075')
self.assertEqual(listing[-1][0], '2:0004')
listing = broker.list_objects_iter(10, '', None, '0:01', '')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0:0100')
self.assertEqual(listing[-1][0], '0:0109')
listing = broker.list_objects_iter(10, '', None, '0:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0:0000')
self.assertEqual(listing[-1][0], '0:0009')
# Same as above, but using the path argument, so nothing should be
# returned since path uses a '/' as a delimiter.
listing = broker.list_objects_iter(10, '', None, None, '', '0')
self.assertEqual(len(listing), 0)
listing = broker.list_objects_iter(10, '', None, '', ':')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['0:', '1:', '2:', '3:'])
listing = broker.list_objects_iter(10, '2', None, None, ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['2:', '3:'])
listing = broker.list_objects_iter(10, '2:', None, None, ':')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3:'])
listing = broker.list_objects_iter(10, '2:0050', None, '2:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '2:0051')
self.assertEqual(listing[1][0], '2:0051:')
self.assertEqual(listing[2][0], '2:0052')
self.assertEqual(listing[-1][0], '2:0059')
listing = broker.list_objects_iter(10, '3:0045', None, '3:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3:0045:', '3:0046', '3:0046:', '3:0047',
'3:0047:', '3:0048', '3:0048:', '3:0049',
'3:0049:', '3:0050'])
broker.put_object('3:0049:', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3:0048', None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3:0048:0049', '3:0049', '3:0049:',
'3:0049:0049', '3:0050', '3:0050:0049', '3:0051', '3:0051:0049',
'3:0052', '3:0052:0049'])
listing = broker.list_objects_iter(10, '3:0048', None, '3:', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['3:0048:', '3:0049', '3:0049:', '3:0050',
'3:0050:', '3:0051', '3:0051:', '3:0052', '3:0052:', '3:0053'])
listing = broker.list_objects_iter(10, None, None, '3:0049:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['3:0049:', '3:0049:0049'])
# Same as above, but using the path argument, so nothing should be
# returned since path uses a '/' as a delimiter.
listing = broker.list_objects_iter(10, None, None, None, None,
'3:0049')
self.assertEqual(len(listing), 0)
listing = broker.list_objects_iter(2, None, None, '3:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['3:0000', '3:0000:'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEqual(len(listing), 0)
def test_list_objects_iter_prefix_delim(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object(
'/pets/dogs/1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/dogs/2', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish/a', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish/b', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/pets/fish_info.txt', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'/snakes', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# def list_objects_iter(self, limit, marker, prefix, delimiter,
# path=None, format=None):
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/')
self.assertEqual([row[0] for row in listing],
['/pets/fish/a', '/pets/fish/b'])
listing = broker.list_objects_iter(100, None, None, None, '/')
self.assertEqual([row[0] for row in listing],
['/'])
def test_list_objects_iter_order_and_reverse(self):
# Test ContainerBroker.list_objects_iter
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object(
'o1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o10', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'O1', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o2', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'o3', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object(
'O4', Timestamp(0).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=False)
self.assertEqual([row[0] for row in listing],
['O1', 'O4', 'o1', 'o10', 'o2', 'o3'])
listing = broker.list_objects_iter(100, None, None, '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o3', 'o2', 'o10', 'o1', 'O4', 'O1'])
listing = broker.list_objects_iter(2, None, None, '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o3', 'o2'])
listing = broker.list_objects_iter(100, 'o2', 'O4', '', '',
reverse=True)
self.assertEqual([row[0] for row in listing],
['o10', 'o1'])
def test_double_check_trailing_delimiter(self):
# Test ContainerBroker.list_objects_iter for a
# container that has an odd file with a trailing delimiter
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('00', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/00', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1/', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0/1/0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1/', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1/0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(25, None, None, None, None)
self.assertEqual(len(listing), 22)
self.assertEqual(
[row[0] for row in listing],
['0', '0/', '0/0', '0/00', '0/1', '0/1/', '0/1/0', '00', '1', '1/',
'1/0', 'a', 'a/', 'a/0', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b',
'b/a', 'b/b', 'c'])
listing = broker.list_objects_iter(25, None, None, '', '/')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['0', '0/', '00', '1', '1/', 'a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_objects_iter(25, None, None, 'a/', '/')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['a/', 'a/0', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_objects_iter(25, None, None, '0/', '/')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['0/', '0/0', '0/00', '0/1', '0/1/'])
listing = broker.list_objects_iter(25, None, None, '0/1/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['0/1/', '0/1/0'])
listing = broker.list_objects_iter(25, None, None, 'b/', '/')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b/a', 'b/b'])
def test_double_check_trailing_delimiter_non_slash(self):
# Test ContainerBroker.list_objects_iter for a
# container that has an odd file with a trailing delimiter
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a:a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:a:b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b:a', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b:b', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a:0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('00', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:00', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1:', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('0:1:0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1:', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('1:0', Timestamp.now().internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(25, None, None, None, None)
self.assertEqual(len(listing), 22)
self.assertEqual(
[row[0] for row in listing],
['0', '00', '0:', '0:0', '0:00', '0:1', '0:1:', '0:1:0', '1', '1:',
'1:0', 'a', 'a:', 'a:0', 'a:a', 'a:a:a', 'a:a:b', 'a:b', 'b',
'b:a', 'b:b', 'c'])
listing = broker.list_objects_iter(25, None, None, '', ':')
self.assertEqual(len(listing), 10)
self.assertEqual(
[row[0] for row in listing],
['0', '00', '0:', '1', '1:', 'a', 'a:', 'b', 'b:', 'c'])
listing = broker.list_objects_iter(25, None, None, 'a:', ':')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['a:', 'a:0', 'a:a', 'a:a:', 'a:b'])
listing = broker.list_objects_iter(25, None, None, '0:', ':')
self.assertEqual(len(listing), 5)
self.assertEqual(
[row[0] for row in listing],
['0:', '0:0', '0:00', '0:1', '0:1:'])
listing = broker.list_objects_iter(25, None, None, '0:1:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual(
[row[0] for row in listing],
['0:1:', '0:1:0'])
listing = broker.list_objects_iter(25, None, None, 'b:', ':')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b:a', 'b:b'])
def test_chexor(self):
def md5_str(s):
if not isinstance(s, bytes):
s = s.encode('utf8')
return md5(s, usedforsecurity=False).hexdigest()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hasha = md5_str('%s-%s' % ('a', Timestamp(1).internal))
hashb = md5_str('%s-%s' % ('b', Timestamp(2).internal))
hashc = '%032x' % (int(hasha, 16) ^ int(hashb, 16))
self.assertEqual(broker.get_info()['hash'], hashc)
broker.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hashb = md5_str('%s-%s' % ('b', Timestamp(3).internal))
hashc = '%032x' % (int(hasha, 16) ^ int(hashb, 16))
self.assertEqual(broker.get_info()['hash'], hashc)
@with_tempdir
def test_newid(self, tempdir):
# test DatabaseBroker.newid
db_path = os.path.join(
tempdir, "d1234", 'contianers', 'part', 'suffix', 'hsh')
os.makedirs(db_path)
broker = ContainerBroker(os.path.join(db_path, 'my.db'),
account='a', container='c')
broker.initialize(Timestamp('1').internal, 0)
id = broker.get_info()['id']
broker.newid('someid')
self.assertNotEqual(id, broker.get_info()['id'])
# ends in the device name (from the path) unless it's an old
# container with just a uuid4 (tested in legecy broker
# tests e.g *BeforeMetaData)
if len(id) > 36:
self.assertTrue(id.endswith('d1234'))
# But the newid'ed version will now have the decide
self.assertTrue(broker.get_info()['id'].endswith('d1234'))
# if we move the broker (happens after an rsync)
new_db_path = os.path.join(
tempdir, "d5678", 'containers', 'part', 'suffix', 'hsh')
os.makedirs(new_db_path)
shutil.copy(os.path.join(db_path, 'my.db'),
os.path.join(new_db_path, 'my.db'))
new_broker = ContainerBroker(os.path.join(new_db_path, 'my.db'),
account='a', container='c')
new_broker.newid(id)
# ends in the device name (from the path)
self.assertFalse(new_broker.get_info()['id'].endswith('d1234'))
self.assertTrue(new_broker.get_info()['id'].endswith('d5678'))
def test_get_items_since(self):
# test DatabaseBroker.get_items_since
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
broker.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
max_row = broker.get_replication_info()['max_row']
broker.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
items = broker.get_items_since(max_row, 1000)
self.assertEqual(len(items), 1)
self.assertEqual(items[0]['name'], 'b')
def test_sync_merging(self):
# exercise the DatabaseBroker sync functions a bit
broker1 = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker1.initialize(Timestamp('1').internal, 0)
broker2 = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
self.assertEqual(broker2.get_sync('12345'), -1)
broker1.merge_syncs([{'sync_point': 3, 'remote_id': '12345'}])
broker2.merge_syncs(broker1.get_syncs())
self.assertEqual(broker2.get_sync('12345'), 3)
def test_merge_items(self):
broker1 = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker1.initialize(Timestamp('1').internal, 0)
broker2 = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(1).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker1._commit_puts()
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 2)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_object('c', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 3)
self.assertEqual(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
@with_tempdir
def test_merge_items_is_green(self, tempdir):
ts = make_timestamp_iter()
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(ts).internal, 1)
broker.put_object('b', next(ts).internal, 0, 'text/plain',
EMPTY_ETAG)
with mock.patch('swift.container.backend.tpool') as mock_tpool:
broker.get_info()
mock_tpool.execute.assert_called_once()
def test_merge_items_overwrite_unicode(self):
# test DatabaseBroker.merge_items
snowman = u'\N{SNOWMAN}'
if six.PY2:
snowman = snowman.encode('utf-8')
broker1 = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object(snowman, Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker1._commit_puts()
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id), 1000))), id)
broker1.put_object(snowman, Timestamp(4).internal, 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker1._commit_puts()
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id), 1000))), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['b', snowman],
sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == snowman:
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
def test_merge_items_overwrite(self):
# test DatabaseBroker.merge_items
broker1 = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', Timestamp(4).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
def test_merge_items_post_overwrite_out_of_order(self):
# test DatabaseBroker.merge_items
broker1 = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker1.initialize(Timestamp('1').internal, 0)
id = broker1.get_info()['id']
broker2 = ContainerBroker(self.get_db_path(),
account='a', container='c')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_object('a', Timestamp(2).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', Timestamp(3).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
# commit pending file into db
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', Timestamp(4).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
self.assertEqual(rec['content_type'], 'text/plain')
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(3).internal)
broker1.put_object('b', Timestamp(5).internal, 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1._commit_puts()
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEqual(rec['created_at'], Timestamp(4).internal)
if rec['name'] == 'b':
self.assertEqual(rec['created_at'], Timestamp(5).internal)
self.assertEqual(rec['content_type'], 'text/plain')
def test_set_storage_policy_index(self):
ts = make_timestamp_iter()
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
timestamp = next(ts)
broker.initialize(timestamp.internal, 0)
info = broker.get_info()
self.assertEqual(0, info['storage_policy_index']) # sanity check
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
if self.__class__ in (
TestContainerBrokerBeforeMetadata,
TestContainerBrokerBeforeXSync,
TestContainerBrokerBeforeSPI,
TestContainerBrokerBeforeShardRanges,
TestContainerBrokerBeforeShardRangeReportedColumn,
TestContainerBrokerBeforeShardRangeTombstonesColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(timestamp.internal, info['status_changed_at'])
expected = {0: {'object_count': 0, 'bytes_used': 0}}
self.assertEqual(expected, broker.get_policy_stats())
timestamp = next(ts)
broker.set_storage_policy_index(111, timestamp.internal)
self.assertEqual(broker.storage_policy_index, 111)
info = broker.get_info()
self.assertEqual(111, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(timestamp.internal, info['status_changed_at'])
expected[111] = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(expected, broker.get_policy_stats())
timestamp = next(ts)
broker.set_storage_policy_index(222, timestamp.internal)
self.assertEqual(broker.storage_policy_index, 222)
info = broker.get_info()
self.assertEqual(222, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(timestamp.internal, info['status_changed_at'])
expected[222] = {'object_count': 0, 'bytes_used': 0}
self.assertEqual(expected, broker.get_policy_stats())
old_timestamp, timestamp = timestamp, next(ts)
# setting again is idempotent
broker.set_storage_policy_index(222, timestamp.internal)
info = broker.get_info()
self.assertEqual(222, info['storage_policy_index'])
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
self.assertEqual(old_timestamp.internal, info['status_changed_at'])
self.assertEqual(expected, broker.get_policy_stats())
def test_set_storage_policy_index_empty(self):
# Putting an object may trigger migrations, so test with a
# never-had-an-object container to make sure we handle it
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
info = broker.get_info()
self.assertEqual(0, info['storage_policy_index'])
broker.set_storage_policy_index(2)
info = broker.get_info()
self.assertEqual(2, info['storage_policy_index'])
def test_reconciler_sync(self):
broker = ContainerBroker(self.get_db_path(),
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
self.assertEqual(-1, broker.get_reconciler_sync())
broker.update_reconciler_sync(10)
self.assertEqual(10, broker.get_reconciler_sync())
@with_tempdir
def test_legacy_pending_files(self, tempdir):
ts = make_timestamp_iter()
db_path = os.path.join(tempdir, 'container.db')
# first init an acct DB without the policy_stat table present
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(ts).internal, 1)
# manually make some pending entries lacking storage_policy_index
with open(broker.pending_file, 'a+b') as fp:
for i in range(10):
name, timestamp, size, content_type, etag, deleted = (
'o%s' % i, next(ts).internal, 0, 'c', 'e', 0)
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=2)))
fp.flush()
# use put_object to append some more entries with different
# values for storage_policy_index
for i in range(10, 30):
name = 'o%s' % i
if i < 20:
size = 1
storage_policy_index = 0
else:
size = 2
storage_policy_index = 1
broker.put_object(name, next(ts).internal, size, 'c', 'e', 0,
storage_policy_index=storage_policy_index)
broker._commit_puts_stale_ok()
# 10 objects with 0 bytes each in the legacy pending entries
# 10 objects with 1 bytes each in storage policy 0
# 10 objects with 2 bytes each in storage policy 1
expected = {
0: {'object_count': 20, 'bytes_used': 10},
1: {'object_count': 10, 'bytes_used': 20},
}
self.assertEqual(broker.get_policy_stats(), expected)
@with_tempdir
def test_get_info_no_stale_reads(self, tempdir):
ts = make_timestamp_iter()
db_path = os.path.join(tempdir, 'container.db')
def mock_commit_puts():
raise sqlite3.OperationalError('unable to open database file')
broker = ContainerBroker(db_path, account='a', container='c',
stale_reads_ok=False)
broker.initialize(next(ts).internal, 1)
# manually make some pending entries
with open(broker.pending_file, 'a+b') as fp:
for i in range(10):
name, timestamp, size, content_type, etag, deleted = (
'o%s' % i, next(ts).internal, 0, 'c', 'e', 0)
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=2)))
fp.flush()
broker._commit_puts = mock_commit_puts
with self.assertRaises(sqlite3.OperationalError) as exc_context:
broker.get_info()
self.assertIn('unable to open database file',
str(exc_context.exception))
@with_tempdir
def test_get_info_stale_read_ok(self, tempdir):
ts = make_timestamp_iter()
db_path = os.path.join(tempdir, 'container.db')
def mock_commit_puts():
raise sqlite3.OperationalError('unable to open database file')
broker = ContainerBroker(db_path, account='a', container='c',
stale_reads_ok=True)
broker.initialize(next(ts).internal, 1)
# manually make some pending entries
with open(broker.pending_file, 'a+b') as fp:
for i in range(10):
name, timestamp, size, content_type, etag, deleted = (
'o%s' % i, next(ts).internal, 0, 'c', 'e', 0)
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
(name, timestamp, size, content_type, etag, deleted),
protocol=2)))
fp.flush()
broker._commit_puts = mock_commit_puts
broker.get_info()
@with_tempdir
def test_create_broker(self, tempdir):
broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c')
hsh = hash_path('a', 'c')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:], hsh, hsh + '.db')
self.assertEqual(expected_path, broker.db_file)
self.assertTrue(os.path.isfile(expected_path))
self.assertTrue(init)
broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c')
self.assertEqual(expected_path, broker.db_file)
self.assertFalse(init)
ts = Timestamp.now()
broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c1',
put_timestamp=ts.internal)
hsh = hash_path('a', 'c1')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:], hsh, hsh + '.db')
self.assertEqual(expected_path, broker.db_file)
self.assertTrue(os.path.isfile(expected_path))
self.assertEqual(ts.internal, broker.get_info()['put_timestamp'])
self.assertEqual(0, broker.get_info()['storage_policy_index'])
self.assertTrue(init)
epoch = Timestamp.now()
broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c3',
epoch=epoch)
hsh = hash_path('a', 'c3')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:],
hsh, '%s_%s.db' % (hsh, epoch.internal))
self.assertEqual(expected_path, broker.db_file)
self.assertTrue(init)
@with_tempdir
def test_pending_file_name(self, tempdir):
# pending file should have same name for sharded or unsharded db
expected_pending_path = os.path.join(tempdir, 'container.db.pending')
db_path = os.path.join(tempdir, 'container.db')
fresh_db_path = os.path.join(tempdir, 'container_epoch.db')
def do_test(given_db_file, expected_db_file):
broker = ContainerBroker(given_db_file, account='a', container='c')
self.assertEqual(expected_pending_path, broker.pending_file)
self.assertEqual(expected_db_file, broker.db_file)
# no files exist
do_test(db_path, db_path)
do_test(fresh_db_path, fresh_db_path)
# only container.db exists - unsharded
with open(db_path, 'wb'):
pass
do_test(db_path, db_path)
do_test(fresh_db_path, db_path)
# container.db and container_shard.db exist - sharding
with open(fresh_db_path, 'wb'):
pass
do_test(db_path, fresh_db_path)
do_test(fresh_db_path, fresh_db_path)
# only container_shard.db exists - sharded
os.unlink(db_path)
do_test(db_path, fresh_db_path)
do_test(fresh_db_path, fresh_db_path)
@with_tempdir
def test_sharding_sysmeta(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='myaccount', container='mycontainer')
broker.initialize(Timestamp.now().internal)
expected = 'aaa/ccc'
with mock_timestamp_now() as now:
broker.set_sharding_sysmeta('Root', expected)
actual = broker.metadata
self.assertEqual([expected, now.internal],
actual.get('X-Container-Sysmeta-Shard-Root'))
self.assertEqual(expected, broker.get_sharding_sysmeta('Root'))
expected = {'key': 'value'}
with mock_timestamp_now() as now:
broker.set_sharding_sysmeta('test', expected)
actual = broker.metadata
self.assertEqual([expected, now.internal],
actual.get('X-Container-Sysmeta-Shard-test'))
self.assertEqual(expected, broker.get_sharding_sysmeta('test'))
@with_tempdir
def test_path(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='myaccount', container='mycontainer')
broker.initialize(next(self.ts).internal, 1)
# make sure we can cope with unitialized account and container
broker.account = broker.container = None
self.assertEqual('myaccount/mycontainer', broker.path)
@with_tempdir
def test_old_style_root_account_container_path(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='root_a', container='root_c')
broker.initialize(next(self.ts).internal, 1)
# make sure we can cope with unitialized account and container
broker.account = broker.container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
self.assertEqual('root_a', broker.account) # sanity check
self.assertEqual('root_c', broker.container) # sanity check
# we don't expect root containers to have this sysmeta set but if it is
# the broker should still behave like a root container
metadata = {
'X-Container-Sysmeta-Shard-Root':
('root_a/root_c', next(self.ts).internal)}
broker = ContainerBroker(
db_path, account='root_a', container='root_c')
broker.update_metadata(metadata)
broker.account = broker.container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# if root is marked deleted, it still considers itself to be a root
broker.delete_db(next(self.ts).internal)
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# check the values are not just being cached
broker = ContainerBroker(db_path)
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# check a shard container
db_path = os.path.join(tempdir, 'shard_container.db')
broker = ContainerBroker(
db_path, account='.shards_root_a', container='c_shard')
broker.initialize(next(self.ts).internal, 1)
# now the metadata is significant...
metadata = {
'X-Container-Sysmeta-Shard-Root':
('root_a/root_c', next(self.ts).internal)}
broker.update_metadata(metadata)
broker.account = broker.container = None
broker._root_account = broker._root_container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertFalse(broker.is_root_container())
# check validation
def check_validation(root_value):
metadata = {
'X-Container-Sysmeta-Shard-Root':
(root_value, next(self.ts).internal)}
broker.update_metadata(metadata)
broker.account = broker.container = None
broker._root_account = broker._root_container = None
with self.assertRaises(ValueError) as cm:
broker.root_account
self.assertIn('Expected X-Container-Sysmeta-Shard-Root',
str(cm.exception))
with self.assertRaises(ValueError):
broker.root_container
check_validation('root_a')
check_validation('/root_a')
check_validation('/root_a/root_c')
check_validation('/root_a/root_c/blah')
check_validation('/')
@with_tempdir
def test_root_account_container_path(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='root_a', container='root_c')
broker.initialize(next(self.ts).internal, 1)
# make sure we can cope with unitialized account and container
broker.account = broker.container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
self.assertEqual('root_a', broker.account) # sanity check
self.assertEqual('root_c', broker.container) # sanity check
# we don't expect root containers to have this sysmeta set but if it is
# the broker should still behave like a root container
metadata = {
'X-Container-Sysmeta-Shard-Quoted-Root':
('root_a/root_c', next(self.ts).internal)}
broker = ContainerBroker(
db_path, account='root_a', container='root_c')
broker.update_metadata(metadata)
broker.account = broker.container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# if root is marked deleted, it still considers itself to be a root
broker.delete_db(next(self.ts).internal)
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# check the values are not just being cached
broker = ContainerBroker(db_path)
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertTrue(broker.is_root_container())
# check a shard container
db_path = os.path.join(tempdir, 'shard_container.db')
broker = ContainerBroker(
db_path, account='.shards_root_a', container='c_shard')
broker.initialize(next(self.ts).internal, 1)
# now the metadata is significant...
metadata = {
'X-Container-Sysmeta-Shard-Quoted-Root':
('root_a/root_c', next(self.ts).internal)}
broker.update_metadata(metadata)
broker.account = broker.container = None
broker._root_account = broker._root_container = None
self.assertEqual('root_a', broker.root_account)
self.assertEqual('root_c', broker.root_container)
self.assertEqual('root_a/root_c', broker.root_path)
self.assertFalse(broker.is_root_container())
# check validation
def check_validation(root_value):
metadata = {
'X-Container-Sysmeta-Shard-Quoted-Root':
(root_value, next(self.ts).internal)}
broker.update_metadata(metadata)
broker.account = broker.container = None
broker._root_account = broker._root_container = None
with self.assertRaises(ValueError) as cm:
broker.root_account
self.assertIn('Expected X-Container-Sysmeta-Shard-Quoted-Root',
str(cm.exception))
with self.assertRaises(ValueError):
broker.root_container
check_validation('root_a')
check_validation('/root_a')
check_validation('/root_a/root_c')
check_validation('/root_a/root_c/blah')
check_validation('/')
def test_resolve_shard_range_states(self):
self.assertIsNone(ContainerBroker.resolve_shard_range_states(None))
self.assertIsNone(ContainerBroker.resolve_shard_range_states([]))
for state_num, state_name in ShardRange.STATES.items():
self.assertEqual({state_num},
ContainerBroker.resolve_shard_range_states(
[state_name]))
self.assertEqual({state_num},
ContainerBroker.resolve_shard_range_states(
[state_num]))
self.assertEqual(set(ShardRange.STATES),
ContainerBroker.resolve_shard_range_states(
ShardRange.STATES_BY_NAME))
self.assertEqual(
set(ShardRange.STATES),
ContainerBroker.resolve_shard_range_states(ShardRange.STATES))
# check aliases
self.assertEqual(
{ShardRange.CLEAVED, ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING},
ContainerBroker.resolve_shard_range_states(['listing']))
self.assertEqual(
{ShardRange.CLEAVED, ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING},
ContainerBroker.resolve_shard_range_states(['listing', 'active']))
self.assertEqual(
{ShardRange.CLEAVED, ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING, ShardRange.CREATED},
ContainerBroker.resolve_shard_range_states(['listing', 'created']))
self.assertEqual(
{ShardRange.CREATED, ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING},
ContainerBroker.resolve_shard_range_states(['updating']))
self.assertEqual(
{ShardRange.CREATED, ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING, ShardRange.SHRINKING},
ContainerBroker.resolve_shard_range_states(
['updating', 'listing']))
self.assertEqual(
{ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING, ShardRange.SHARDED,
ShardRange.SHRINKING, ShardRange.SHRUNK},
ContainerBroker.resolve_shard_range_states(['auditing']))
def check_bad_value(value):
with self.assertRaises(ValueError) as cm:
ContainerBroker.resolve_shard_range_states(value)
self.assertIn('Invalid state', str(cm.exception))
check_bad_value(['bad_state', 'active'])
check_bad_value([''])
check_bad_value('active')
@with_tempdir
def test_get_shard_ranges(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
# no rows
self.assertFalse(broker.get_shard_ranges())
# check that a default own shard range is not generated
self.assertFalse(broker.get_shard_ranges(include_own=True))
# merge row for own shard range
own_shard_range = ShardRange(broker.path, next(self.ts), 'l', 'u',
state=ShardRange.SHARDING)
broker.merge_shard_ranges([own_shard_range])
self.assertFalse(broker.get_shard_ranges())
self.assertFalse(broker.get_shard_ranges(include_own=False))
actual = broker.get_shard_ranges(include_own=True)
self.assertEqual([dict(sr) for sr in [own_shard_range]],
[dict(sr) for sr in actual])
# merge rows for other shard ranges
shard_ranges = [
ShardRange('.a/c0', next(self.ts), 'a', 'c'),
ShardRange('.a/c1', next(self.ts), 'c', 'd'),
ShardRange('.a/c2', next(self.ts), 'd', 'f',
state=ShardRange.ACTIVE),
ShardRange('.a/c3', next(self.ts), 'e', 'f', deleted=1,
state=ShardRange.SHARDED,),
ShardRange('.a/c4', next(self.ts), 'f', 'h',
state=ShardRange.CREATED),
ShardRange('.a/c5', next(self.ts), 'h', 'j', deleted=1)
]
broker.merge_shard_ranges(shard_ranges)
actual = broker.get_shard_ranges()
undeleted = shard_ranges[:3] + shard_ranges[4:5]
self.assertEqual([dict(sr) for sr in undeleted],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(include_deleted=True)
self.assertEqual([dict(sr) for sr in shard_ranges],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(reverse=True)
self.assertEqual([dict(sr) for sr in reversed(undeleted)],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='c', end_marker='e')
self.assertEqual([dict(sr) for sr in shard_ranges[1:3]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='c', end_marker='e',
states=ShardRange.ACTIVE)
self.assertEqual([dict(sr) for sr in shard_ranges[2:3]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='e', end_marker='e')
self.assertFalse([dict(sr) for sr in actual])
# includes overrides include_own
actual = broker.get_shard_ranges(includes='b', include_own=True)
self.assertEqual([dict(shard_ranges[0])], [dict(sr) for sr in actual])
# ... unless they coincide
actual = broker.get_shard_ranges(includes='t', include_own=True)
self.assertEqual([dict(own_shard_range)], [dict(sr) for sr in actual])
# exclude_others overrides includes
actual = broker.get_shard_ranges(includes='b', exclude_others=True)
self.assertFalse(actual)
# include_deleted overrides includes
actual = broker.get_shard_ranges(includes='i', include_deleted=True)
self.assertEqual([dict(shard_ranges[-1])], [dict(sr) for sr in actual])
actual = broker.get_shard_ranges(includes='i', include_deleted=False)
self.assertFalse(actual)
# includes overrides marker/end_marker
actual = broker.get_shard_ranges(includes='b', marker='e',
end_marker='')
self.assertEqual([dict(shard_ranges[0])], [dict(sr) for sr in actual])
actual = broker.get_shard_ranges(includes='b', marker=Namespace.MAX)
self.assertEqual([dict(shard_ranges[0])], [dict(sr) for sr in actual])
# end_marker is Namespace.MAX
actual = broker.get_shard_ranges(marker='e', end_marker='')
self.assertEqual([dict(sr) for sr in undeleted[2:]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='e', end_marker='',
reverse=True)
self.assertEqual([dict(sr) for sr in reversed(undeleted[:3])],
[dict(sr) for sr in actual])
# marker is Namespace.MIN
actual = broker.get_shard_ranges(marker='', end_marker='d')
self.assertEqual([dict(sr) for sr in shard_ranges[:2]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='', end_marker='d',
reverse=True, include_deleted=True)
self.assertEqual([dict(sr) for sr in reversed(shard_ranges[2:])],
[dict(sr) for sr in actual])
# marker, end_marker span entire namespace
actual = broker.get_shard_ranges(marker='', end_marker='')
self.assertEqual([dict(sr) for sr in undeleted],
[dict(sr) for sr in actual])
# marker, end_marker override include_own
actual = broker.get_shard_ranges(marker='', end_marker='k',
include_own=True)
self.assertEqual([dict(sr) for sr in undeleted],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(marker='u', end_marker='',
include_own=True)
self.assertFalse(actual)
# ...unless they coincide
actual = broker.get_shard_ranges(marker='t', end_marker='',
include_own=True)
self.assertEqual([dict(own_shard_range)], [dict(sr) for sr in actual])
# null namespace cases
actual = broker.get_shard_ranges(end_marker=Namespace.MIN)
self.assertFalse(actual)
actual = broker.get_shard_ranges(marker=Namespace.MAX)
self.assertFalse(actual)
orig_execute = GreenDBConnection.execute
mock_call_args = []
def mock_execute(*args, **kwargs):
mock_call_args.append(args)
return orig_execute(*args, **kwargs)
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
actual = broker.get_shard_ranges(includes='f')
self.assertEqual([dict(sr) for sr in shard_ranges[2:3]],
[dict(sr) for sr in actual])
self.assertEqual(1, len(mock_call_args))
# verify that includes keyword plumbs through to an SQL condition
self.assertIn("WHERE deleted=0 AND name != ? AND lower < ? AND "
"(upper = '' OR upper >= ?)", mock_call_args[0][1])
self.assertEqual(['a/c', 'f', 'f'], mock_call_args[0][2])
mock_call_args = []
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
actual = broker.get_shard_ranges(marker='c', end_marker='d')
self.assertEqual([dict(sr) for sr in shard_ranges[1:2]],
[dict(sr) for sr in actual])
self.assertEqual(1, len(mock_call_args))
# verify that marker & end_marker plumb through to an SQL condition
self.assertIn("WHERE deleted=0 AND name != ? AND lower < ? AND "
"(upper = '' OR upper > ?)", mock_call_args[0][1])
self.assertEqual(['a/c', 'd', 'c'], mock_call_args[0][2])
actual = broker.get_shard_ranges(includes='i')
self.assertFalse(actual)
actual = broker.get_shard_ranges(
states=[ShardRange.CREATED, ShardRange.ACTIVE])
self.assertEqual(
[dict(sr) for sr in [shard_ranges[2], shard_ranges[4]]],
[dict(sr) for sr in actual])
# fill gaps
filler = own_shard_range.copy()
filler.lower = 'h'
actual = broker.get_shard_ranges(fill_gaps=True)
self.assertEqual([dict(sr) for sr in undeleted + [filler]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(fill_gaps=True, marker='a')
self.assertEqual([dict(sr) for sr in undeleted + [filler]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(fill_gaps=True, end_marker='z')
self.assertEqual([dict(sr) for sr in undeleted + [filler]],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(fill_gaps=True, end_marker='k')
filler.upper = 'k'
self.assertEqual([dict(sr) for sr in undeleted + [filler]],
[dict(sr) for sr in actual])
# includes overrides fill_gaps
actual = broker.get_shard_ranges(includes='b', fill_gaps=True)
self.assertEqual([dict(shard_ranges[0])], [dict(sr) for sr in actual])
# no filler needed...
actual = broker.get_shard_ranges(fill_gaps=True, end_marker='h')
self.assertEqual([dict(sr) for sr in undeleted],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(fill_gaps=True, end_marker='a')
self.assertEqual([], [dict(sr) for sr in actual])
# get everything
actual = broker.get_shard_ranges(include_own=True)
self.assertEqual([dict(sr) for sr in undeleted + [own_shard_range]],
[dict(sr) for sr in actual])
# get just own range
actual = broker.get_shard_ranges(include_own=True, exclude_others=True)
self.assertEqual([dict(sr) for sr in [own_shard_range]],
[dict(sr) for sr in actual])
# if you ask for nothing you'll get nothing
actual = broker.get_shard_ranges(
include_own=False, exclude_others=True)
self.assertFalse(actual)
@with_tempdir
def test_get_shard_ranges_includes(self, tempdir):
ts = next(self.ts)
start = ShardRange('a/-a', ts, '', 'a')
atof = ShardRange('a/a-f', ts, 'a', 'f')
ftol = ShardRange('a/f-l', ts, 'f', 'l')
ltor = ShardRange('a/l-r', ts, 'l', 'r')
rtoz = ShardRange('a/r-z', ts, 'r', 'z')
end = ShardRange('a/z-', ts, 'z', '')
ranges = [start, atof, ftol, ltor, rtoz, end]
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
broker.merge_shard_ranges(ranges)
actual = broker.get_shard_ranges(includes='')
self.assertEqual(actual, [])
actual = broker.get_shard_ranges(includes=' ')
self.assertEqual(actual, [start])
actual = broker.get_shard_ranges(includes='b')
self.assertEqual(actual, [atof])
actual = broker.get_shard_ranges(includes='f')
self.assertEqual(actual, [atof])
actual = broker.get_shard_ranges(includes='f\x00')
self.assertEqual(actual, [ftol])
actual = broker.get_shard_ranges(includes='x')
self.assertEqual(actual, [rtoz])
actual = broker.get_shard_ranges(includes='r')
self.assertEqual(actual, [ltor])
actual = broker.get_shard_ranges(includes='}')
self.assertEqual(actual, [end])
# add some overlapping sub-shards
ftoh = ShardRange('a/f-h', ts, 'f', 'h')
htok = ShardRange('a/h-k', ts, 'h', 'k')
broker.merge_shard_ranges([ftoh, htok])
actual = broker.get_shard_ranges(includes='g')
self.assertEqual(actual, [ftoh])
actual = broker.get_shard_ranges(includes='h')
self.assertEqual(actual, [ftoh])
actual = broker.get_shard_ranges(includes='k')
self.assertEqual(actual, [htok])
actual = broker.get_shard_ranges(includes='l')
self.assertEqual(actual, [ftol])
actual = broker.get_shard_ranges(includes='m')
self.assertEqual(actual, [ltor])
# remove l-r from shard ranges and try and find a shard range for an
# item in that range.
ltor.set_deleted(next(self.ts))
broker.merge_shard_ranges([ltor])
actual = broker.get_shard_ranges(includes='p')
self.assertEqual(actual, [])
@with_tempdir
def test_overlap_shard_range_order(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
epoch0 = next(self.ts)
epoch1 = next(self.ts)
shard_ranges = [
ShardRange('.shard_a/shard_%d-%d' % (e, s), epoch, l, u,
state=ShardRange.ACTIVE)
for s, (l, u) in enumerate(zip(string.ascii_letters[:7],
string.ascii_letters[1:]))
for e, epoch in enumerate((epoch0, epoch1))
]
random.shuffle(shard_ranges)
for sr in shard_ranges:
broker.merge_shard_ranges([sr])
expected = [
'.shard_a/shard_0-0',
'.shard_a/shard_1-0',
'.shard_a/shard_0-1',
'.shard_a/shard_1-1',
'.shard_a/shard_0-2',
'.shard_a/shard_1-2',
'.shard_a/shard_0-3',
'.shard_a/shard_1-3',
'.shard_a/shard_0-4',
'.shard_a/shard_1-4',
'.shard_a/shard_0-5',
'.shard_a/shard_1-5',
'.shard_a/shard_0-6',
'.shard_a/shard_1-6',
]
self.assertEqual(expected, [
sr.name for sr in broker.get_shard_ranges()])
@with_tempdir
def test_get_shard_ranges_with_sharding_overlaps(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
shard_ranges = [
ShardRange('.shards_a/c0', next(self.ts), 'a', 'd',
state=ShardRange.ACTIVE),
ShardRange('.shards_a/c1_0', next(self.ts), 'd', 'g',
state=ShardRange.CLEAVED),
ShardRange('.shards_a/c1_1', next(self.ts), 'g', 'j',
state=ShardRange.CLEAVED),
ShardRange('.shards_a/c1_2', next(self.ts), 'j', 'm',
state=ShardRange.CREATED),
ShardRange('.shards_a/c1', next(self.ts), 'd', 'm',
state=ShardRange.SHARDING),
ShardRange('.shards_a/c2', next(self.ts), 'm', '',
state=ShardRange.ACTIVE),
]
broker.merge_shard_ranges(
random.sample(shard_ranges, len(shard_ranges)))
actual = broker.get_shard_ranges()
self.assertEqual([dict(sr) for sr in shard_ranges],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(states=SHARD_LISTING_STATES)
self.assertEqual(
[dict(sr) for sr in shard_ranges[:3] + shard_ranges[4:]],
[dict(sr) for sr in actual])
orig_execute = GreenDBConnection.execute
mock_call_args = []
def mock_execute(*args, **kwargs):
mock_call_args.append(args)
return orig_execute(*args, **kwargs)
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
actual = broker.get_shard_ranges(states=SHARD_UPDATE_STATES,
includes='e')
self.assertEqual([dict(shard_ranges[1])],
[dict(sr) for sr in actual])
self.assertEqual(1, len(mock_call_args))
# verify that includes keyword plumbs through to an SQL condition
self.assertIn("WHERE deleted=0 AND state in (?,?,?,?) AND name != ? "
"AND lower < ? AND (upper = '' OR upper >= ?)",
mock_call_args[0][1])
actual = broker.get_shard_ranges(states=SHARD_UPDATE_STATES,
includes='j')
self.assertEqual([shard_ranges[2]], actual)
actual = broker.get_shard_ranges(states=SHARD_UPDATE_STATES,
includes='k')
self.assertEqual([shard_ranges[3]], actual)
@with_tempdir
def test_get_shard_ranges_with_shrinking_overlaps(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
shard_ranges = [
ShardRange('.shards_a/c0', next(self.ts), 'a', 'k',
state=ShardRange.ACTIVE),
ShardRange('.shards_a/c1', next(self.ts), 'k', 'm',
state=ShardRange.SHRINKING),
ShardRange('.shards_a/c2', next(self.ts), 'k', 't',
state=ShardRange.ACTIVE),
ShardRange('.shards_a/c3', next(self.ts), 't', '',
state=ShardRange.ACTIVE),
]
broker.merge_shard_ranges(
random.sample(shard_ranges, len(shard_ranges)))
actual = broker.get_shard_ranges()
self.assertEqual([dict(sr) for sr in shard_ranges],
[dict(sr) for sr in actual])
actual = broker.get_shard_ranges(states=SHARD_UPDATE_STATES,
includes='l')
self.assertEqual([shard_ranges[2]], actual)
@with_tempdir
def test_get_shard_range_rows_with_limit(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
shard_ranges = [
ShardRange('a/c', next(self.ts), 'a', 'c'),
ShardRange('.a/c1', next(self.ts), 'c', 'd'),
ShardRange('.a/c2', next(self.ts), 'd', 'f'),
ShardRange('.a/c3', next(self.ts), 'd', 'f', deleted=1),
]
broker.merge_shard_ranges(shard_ranges)
actual = broker._get_shard_range_rows(include_deleted=True,
include_own=True)
self.assertEqual(4, len(actual))
# the order of rows is not predictable, but they should be unique
self.assertEqual(4, len(set(actual)))
actual = broker._get_shard_range_rows(include_deleted=True)
self.assertEqual(3, len(actual))
self.assertEqual(3, len(set(actual)))
# negative -> unlimited
actual = broker._get_shard_range_rows(include_deleted=True, limit=-1)
self.assertEqual(3, len(actual))
self.assertEqual(3, len(set(actual)))
# zero is applied
actual = broker._get_shard_range_rows(include_deleted=True, limit=0)
self.assertFalse(actual)
actual = broker._get_shard_range_rows(include_deleted=True, limit=1)
self.assertEqual(1, len(actual))
self.assertEqual(1, len(set(actual)))
actual = broker._get_shard_range_rows(include_deleted=True, limit=2)
self.assertEqual(2, len(actual))
self.assertEqual(2, len(set(actual)))
actual = broker._get_shard_range_rows(include_deleted=True, limit=3)
self.assertEqual(3, len(actual))
self.assertEqual(3, len(set(actual)))
actual = broker._get_shard_range_rows(include_deleted=True, limit=4)
self.assertEqual(3, len(actual))
self.assertEqual(3, len(set(actual)))
actual = broker._get_shard_range_rows(include_deleted=True,
include_own=True,
exclude_others=True,
limit=1)
self.assertEqual(1, len(actual))
self.assertEqual(shard_ranges[0], ShardRange(*actual[0]))
actual = broker._get_shard_range_rows(include_deleted=True,
include_own=True,
exclude_others=True,
limit=4)
self.assertEqual(1, len(actual))
self.assertEqual(shard_ranges[0], ShardRange(*actual[0]))
@with_tempdir
def test_get_own_shard_range(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='.shards_a', container='shard_c')
broker.initialize(next(self.ts).internal, 0)
# no row for own shard range - expect a default own shard range
# covering the entire namespace default
now = Timestamp.now()
own_sr = ShardRange(broker.path, now, '', '', 0, 0, now,
state=ShardRange.ACTIVE)
with mock.patch('swift.container.backend.Timestamp.now',
return_value=now):
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
actual = broker.get_own_shard_range(no_default=True)
self.assertIsNone(actual)
# row for own shard range and others
ts_1 = next(self.ts)
own_sr = ShardRange(broker.path, ts_1, 'l', 'u')
broker.merge_shard_ranges(
[own_sr,
ShardRange('.a/c1', next(self.ts), 'b', 'c'),
ShardRange('.a/c2', next(self.ts), 'c', 'd')])
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
# check stats are not automatically updated
broker.put_object(
'o1', next(self.ts).internal, 100, 'text/plain', 'etag1')
broker.put_object(
'o2', next(self.ts).internal, 99, 'text/plain', 'etag2')
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
# check non-zero stats returned
own_sr.update_meta(object_count=2, bytes_used=199,
meta_timestamp=next(self.ts))
broker.merge_shard_ranges(own_sr)
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
# still returned when deleted
own_sr.update_meta(object_count=0, bytes_used=0,
meta_timestamp=next(self.ts))
delete_ts = next(self.ts)
own_sr.set_deleted(timestamp=delete_ts)
broker.merge_shard_ranges(own_sr)
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
# still in table after reclaim_age
broker.reclaim(next(self.ts).internal, next(self.ts).internal)
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
# entire namespace
ts_2 = next(self.ts)
own_sr = ShardRange(broker.path, ts_2, '', '')
broker.merge_shard_ranges([own_sr])
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
orig_execute = GreenDBConnection.execute
mock_call_args = []
def mock_execute(*args, **kwargs):
mock_call_args.append(args)
return orig_execute(*args, **kwargs)
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
actual = broker.get_own_shard_range()
self.assertEqual(dict(own_sr), dict(actual))
self.assertEqual(1, len(mock_call_args))
# verify that SQL is optimised with LIMIT
self.assertIn("WHERE name = ? LIMIT 1", mock_call_args[0][1])
self.assertEqual(['.shards_a/shard_c'], mock_call_args[0][2])
@with_tempdir
def test_enable_sharding(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(
db_path, account='.shards_a', container='shard_c')
broker.initialize(next(self.ts).internal, 0)
epoch = next(self.ts)
broker.enable_sharding(epoch)
own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(epoch, own_sr.epoch)
self.assertEqual(epoch, own_sr.state_timestamp)
self.assertEqual(ShardRange.SHARDING, own_sr.state)
@with_tempdir
def test_get_shard_usage(self, tempdir):
shard_range_by_state = dict(
(state, ShardRange('.shards_a/c_%s' % state, next(self.ts),
str(state), str(state + 1),
2 * state, 2 * state + 1, 2,
state=state))
for state in ShardRange.STATES)
def make_broker(a, c):
db_path = os.path.join(tempdir, '%s.db' % uuid4())
broker = ContainerBroker(db_path, account=a, container=c)
broker.initialize(next(self.ts).internal, 0)
broker.set_sharding_sysmeta('Root', 'a/c')
broker.merge_shard_ranges(list(shard_range_by_state.values()))
return broker
# make broker appear to be a root container
broker = make_broker('a', 'c')
self.assertTrue(broker.is_root_container())
included_states = (ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING)
included = [shard_range_by_state[state] for state in included_states]
expected = {
'object_count': sum([sr.object_count for sr in included]),
'bytes_used': sum([sr.bytes_used for sr in included])
}
self.assertEqual(expected, broker.get_shard_usage())
@with_tempdir
def _check_find_shard_ranges(self, c_lower, c_upper, tempdir):
ts_now = Timestamp.now()
container_name = 'test_container'
def do_test(expected_bounds, expected_last_found, shard_size, limit,
start_index=0, existing=None, minimum_size=1):
# expected_bounds is a list of tuples (lower, upper, object_count)
# build expected shard ranges
expected_shard_ranges = [
dict(lower=lower, upper=upper, index=index,
object_count=object_count)
for index, (lower, upper, object_count)
in enumerate(expected_bounds, start_index)]
with mock.patch('swift.common.utils.time.time',
return_value=float(ts_now.normal)):
ranges, last_found = broker.find_shard_ranges(
shard_size, limit=limit, existing_ranges=existing,
minimum_shard_size=minimum_size)
self.assertEqual(expected_shard_ranges, ranges)
self.assertEqual(expected_last_found, last_found)
db_path = os.path.join(tempdir, 'test_container.db')
broker = ContainerBroker(
db_path, account='a', container=container_name)
# shard size > object count, no objects
broker.initialize(next(self.ts).internal, 0)
ts = next(self.ts)
if c_lower or c_upper:
# testing a shard, so set its own shard range
own_shard_range = ShardRange(broker.path, ts, c_lower, c_upper)
broker.merge_shard_ranges([own_shard_range])
self.assertEqual(([], False), broker.find_shard_ranges(10))
for i in range(10):
broker.put_object(
'obj%02d' % i, next(self.ts).internal, 0, 'text/plain', 'etag')
expected_bounds = [(c_lower, 'obj04', 5), ('obj04', c_upper, 5)]
do_test(expected_bounds, True, shard_size=5, limit=None)
expected = [(c_lower, 'obj06', 7), ('obj06', c_upper, 3)]
do_test(expected, True, shard_size=7, limit=None)
expected = [(c_lower, 'obj08', 9), ('obj08', c_upper, 1)]
do_test(expected, True, shard_size=9, limit=None)
# shard size >= object count
do_test([], False, shard_size=10, limit=None)
do_test([], False, shard_size=11, limit=None)
# check use of limit
do_test([], False, shard_size=4, limit=0)
expected = [(c_lower, 'obj03', 4)]
do_test(expected, False, shard_size=4, limit=1)
expected = [(c_lower, 'obj03', 4), ('obj03', 'obj07', 4)]
do_test(expected, False, shard_size=4, limit=2)
expected = [(c_lower, 'obj03', 4), ('obj03', 'obj07', 4),
('obj07', c_upper, 2)]
do_test(expected, True, shard_size=4, limit=3)
do_test(expected, True, shard_size=4, limit=4)
do_test(expected, True, shard_size=4, limit=-1)
# check use of minimum_shard_size
expected = [(c_lower, 'obj03', 4), ('obj03', 'obj07', 4),
('obj07', c_upper, 2)]
do_test(expected, True, shard_size=4, limit=None, minimum_size=2)
# crazy values ignored...
do_test(expected, True, shard_size=4, limit=None, minimum_size=0)
do_test(expected, True, shard_size=4, limit=None, minimum_size=-1)
# minimum_size > potential final shard
expected = [(c_lower, 'obj03', 4), ('obj03', c_upper, 6)]
do_test(expected, True, shard_size=4, limit=None, minimum_size=3)
# extended shard size >= object_count
do_test([], False, shard_size=6, limit=None, minimum_size=5)
do_test([], False, shard_size=6, limit=None, minimum_size=500)
# increase object count to 11
broker.put_object(
'obj10', next(self.ts).internal, 0, 'text/plain', 'etag')
expected = [(c_lower, 'obj03', 4), ('obj03', 'obj07', 4),
('obj07', c_upper, 3)]
do_test(expected, True, shard_size=4, limit=None)
expected = [(c_lower, 'obj09', 10), ('obj09', c_upper, 1)]
do_test(expected, True, shard_size=10, limit=None)
do_test([], False, shard_size=11, limit=None)
# now pass in a pre-existing shard range
existing = [ShardRange(
'.shards_a/srange-0', Timestamp.now(), '', 'obj03',
object_count=4, state=ShardRange.FOUND)]
expected = [('obj03', 'obj07', 4), ('obj07', c_upper, 3)]
do_test(expected, True, shard_size=4, limit=None, start_index=1,
existing=existing)
expected = [('obj03', 'obj07', 4)]
do_test(expected, False, shard_size=4, limit=1, start_index=1,
existing=existing)
# using increased shard size should not distort estimation of progress
expected = [('obj03', 'obj09', 6), ('obj09', c_upper, 1)]
do_test(expected, True, shard_size=6, limit=None, start_index=1,
existing=existing)
# add another existing...
existing.append(ShardRange(
'.shards_a/srange-1', Timestamp.now(), '', 'obj07',
object_count=4, state=ShardRange.FOUND))
expected = [('obj07', c_upper, 3)]
do_test(expected, True, shard_size=10, limit=None, start_index=2,
existing=existing)
# an existing shard range not in FOUND state should not distort
# estimation of progress, but may cause final range object count to
# default to shard_size
existing[-1].state = ShardRange.CREATED
existing[-1].object_count = 10
# there's only 3 objects left to scan but progress cannot be reliably
# calculated, so final shard range has object count of 2
expected = [('obj07', 'obj09', 2), ('obj09', c_upper, 2)]
do_test(expected, True, shard_size=2, limit=None, start_index=2,
existing=existing)
# add last shard range so there's none left to find
existing.append(ShardRange(
'.shards_a/srange-2', Timestamp.now(), 'obj07', c_upper,
object_count=4, state=ShardRange.FOUND))
do_test([], True, shard_size=4, limit=None, existing=existing)
def test_find_shard_ranges(self):
self._check_find_shard_ranges('', '')
self._check_find_shard_ranges('', 'upper')
self._check_find_shard_ranges('lower', '')
self._check_find_shard_ranges('lower', 'upper')
@with_tempdir
def test_find_shard_ranges_with_misplaced_objects(self, tempdir):
# verify that misplaced objects outside of a shard's range do not
# influence choice of shard ranges (but do distort the object counts)
ts_now = Timestamp.now()
container_name = 'test_container'
db_path = os.path.join(tempdir, 'test_container.db')
broker = ContainerBroker(
db_path, account='a', container=container_name)
# shard size > object count, no objects
broker.initialize(next(self.ts).internal, 0)
ts = next(self.ts)
own_shard_range = ShardRange(broker.path, ts, 'l', 'u')
broker.merge_shard_ranges([own_shard_range])
self.assertEqual(([], False), broker.find_shard_ranges(10))
for name in ('a-misplaced', 'm', 'n', 'p', 'q', 'r', 'z-misplaced'):
broker.put_object(
name, next(self.ts).internal, 0, 'text/plain', 'etag')
expected_bounds = (
('l', 'n', 2), # contains m, n
('n', 'q', 2), # contains p, q
('q', 'u', 3) # contains r; object count distorted by 2 misplaced
)
expected_shard_ranges = [
dict(lower=lower, upper=upper, index=index,
object_count=object_count)
for index, (lower, upper, object_count)
in enumerate(expected_bounds)]
with mock.patch('swift.common.utils.time.time',
return_value=float(ts_now.normal)):
actual_shard_ranges, last_found = broker.find_shard_ranges(2, -1)
self.assertEqual(expected_shard_ranges, actual_shard_ranges)
@with_tempdir
def test_find_shard_ranges_errors(self, tempdir):
db_path = os.path.join(tempdir, 'test_container.db')
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
for i in range(2):
broker.put_object(
'obj%d' % i, next(self.ts).internal, 0, 'text/plain', 'etag')
klass = 'swift.container.backend.ContainerBroker'
with mock.patch(klass + '._get_next_shard_range_upper',
side_effect=LockTimeout()):
ranges, last_found = broker.find_shard_ranges(1)
self.assertFalse(ranges)
self.assertFalse(last_found)
lines = broker.logger.get_lines_for_level('error')
self.assertIn('Problem finding shard upper', lines[0])
self.assertFalse(lines[1:])
broker.logger.clear()
with mock.patch(klass + '._get_next_shard_range_upper',
side_effect=sqlite3.OperationalError()):
ranges, last_found = broker.find_shard_ranges(1)
self.assertFalse(last_found)
self.assertFalse(ranges)
lines = broker.logger.get_lines_for_level('error')
self.assertIn('Problem finding shard upper', lines[0])
self.assertFalse(lines[1:])
@with_tempdir
def test_set_db_states(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
# load up the broker with some objects
objects = [{'name': 'obj_%d' % i,
'created_at': next(self.ts).normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': 0,
'storage_policy_index': 0,
} for i in range(1, 6)]
# merge_items mutates items
broker.merge_items([dict(obj) for obj in objects])
original_info = broker.get_info()
# Add some metadata
meta = {
'X-Container-Meta-Color': ['Blue', next(self.ts).normal],
'X-Container-Meta-Cleared': ['', next(self.ts).normal],
'X-Container-Sysmeta-Shape': ['Circle', next(self.ts).normal],
}
broker.update_metadata(meta)
# Add some syncs
incoming_sync = {'remote_id': 'incoming_123', 'sync_point': 1}
outgoing_sync = {'remote_id': 'outgoing_123', 'sync_point': 2}
broker.merge_syncs([outgoing_sync], incoming=False)
broker.merge_syncs([incoming_sync], incoming=True)
# Add some ShardRanges
shard_ranges = [ShardRange(
name='.shards_a/shard_range_%s' % i,
timestamp=next(self.ts), lower='obj_%d' % i,
upper='obj_%d' % (i + 2),
object_count=len(objects[i:i + 2]),
bytes_used=sum(obj['size'] for obj in objects[i:i + 2]),
meta_timestamp=next(self.ts)) for i in range(0, 6, 2)]
deleted_range = ShardRange('.shards_a/shard_range_z', next(self.ts),
'z', '', state=ShardRange.SHARDED,
deleted=1)
own_sr = ShardRange(name='a/c', timestamp=next(self.ts),
state=ShardRange.ACTIVE)
broker.merge_shard_ranges([own_sr] + shard_ranges + [deleted_range])
ts_epoch = next(self.ts)
new_db_path = os.path.join(tempdir, 'containers', 'part', 'suffix',
'hash', 'container_%s.db' % ts_epoch.normal)
def check_broker_properties(broker):
# these broker properties should remain unchanged as state changes
self.assertEqual(broker.get_max_row(), 5)
all_metadata = broker.metadata
original_meta = dict((k, all_metadata[k]) for k in meta)
self.assertEqual(original_meta, meta)
self.assertEqual(broker.get_syncs(True)[0], incoming_sync)
self.assertEqual(broker.get_syncs(False)[0], outgoing_sync)
self.assertEqual(shard_ranges + [own_sr, deleted_range],
broker.get_shard_ranges(include_own=True,
include_deleted=True))
def check_broker_info(actual_info):
for key in ('db_state', 'id', 'hash'):
actual_info.pop(key, None)
original_info.pop(key, None)
self.assertEqual(original_info, actual_info)
def check_unsharded_state(broker):
# these are expected properties in unsharded state
self.assertEqual(len(broker.get_brokers()), 1)
self.assertEqual(broker.get_db_state(), UNSHARDED)
self.assertTrue(os.path.exists(db_path))
self.assertFalse(os.path.exists(new_db_path))
self.assertEqual(objects, broker.get_objects())
# Sanity checks
check_broker_properties(broker)
check_unsharded_state(broker)
check_broker_info(broker.get_info())
# first test that moving from UNSHARDED to SHARDED doesn't work
self.assertFalse(broker.set_sharded_state())
# check nothing changed
check_broker_properties(broker)
check_broker_info(broker.get_info())
check_unsharded_state(broker)
# cannot go to SHARDING without an epoch set
self.assertFalse(broker.set_sharding_state())
# now set sharding epoch and make sure everything moves.
broker.enable_sharding(ts_epoch)
self.assertTrue(broker.set_sharding_state())
check_broker_properties(broker)
check_broker_info(broker.get_info())
def check_sharding_state(broker):
self.assertEqual(len(broker.get_brokers()), 2)
self.assertEqual(broker.get_db_state(), SHARDING)
self.assertTrue(os.path.exists(db_path))
self.assertTrue(os.path.exists(new_db_path))
self.assertEqual([], broker.get_objects())
self.assertEqual(objects, broker.get_brokers()[0].get_objects())
self.assertEqual(broker.get_reconciler_sync(), -1)
info = broker.get_info()
if info.get('x_container_sync_point1'):
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
check_sharding_state(broker)
# to confirm we're definitely looking at the shard db
broker2 = ContainerBroker(new_db_path)
check_broker_properties(broker2)
check_broker_info(broker2.get_info())
self.assertEqual([], broker2.get_objects())
# Try to set sharding state again
self.assertFalse(broker.set_sharding_state())
# check nothing changed
check_broker_properties(broker)
check_broker_info(broker.get_info())
check_sharding_state(broker)
# Now move to the final state - update shard ranges' state
broker.merge_shard_ranges(
[dict(sr, state=ShardRange.ACTIVE,
state_timestamp=next(self.ts).internal)
for sr in shard_ranges])
# pretend all ranges have been cleaved
self.assertTrue(broker.set_sharded_state())
check_broker_properties(broker)
check_broker_info(broker.get_info())
def check_sharded_state(broker):
self.assertEqual(broker.get_db_state(), SHARDED)
self.assertEqual(len(broker.get_brokers()), 1)
self.assertFalse(os.path.exists(db_path))
self.assertTrue(os.path.exists(new_db_path))
self.assertEqual([], broker.get_objects())
check_sharded_state(broker)
# Try to set sharded state again
self.assertFalse(broker.set_sharded_state())
# check nothing changed
check_broker_properties(broker)
check_broker_info(broker.get_info())
check_sharded_state(broker)
# delete the container
broker.delete_db(next(self.ts).internal)
# but it is not considered deleted while shards have content
self.assertFalse(broker.is_deleted())
check_sharded_state(broker)
# empty the shard ranges
empty_shard_ranges = [sr.copy(object_count=0, bytes_used=0,
meta_timestamp=next(self.ts))
for sr in shard_ranges]
broker.merge_shard_ranges(empty_shard_ranges)
# and now it is deleted
self.assertTrue(broker.is_deleted())
check_sharded_state(broker)
def do_revive_shard_delete(shard_ranges):
# delete all shard ranges
deleted_shard_ranges = [sr.copy(timestamp=next(self.ts), deleted=1)
for sr in shard_ranges]
broker.merge_shard_ranges(deleted_shard_ranges)
self.assertEqual(COLLAPSED, broker.get_db_state())
# add new shard ranges and go to sharding state - need to force
# broker time to be after the delete time in order to write new
# sysmeta
broker.enable_sharding(next(self.ts))
shard_ranges = [sr.copy(timestamp=next(self.ts))
for sr in shard_ranges]
broker.merge_shard_ranges(shard_ranges)
with mock.patch('swift.common.db.time.time',
lambda: float(next(self.ts))):
self.assertTrue(broker.set_sharding_state())
self.assertEqual(SHARDING, broker.get_db_state())
# go to sharded
self.assertTrue(
broker.set_sharded_state())
self.assertEqual(SHARDED, broker.get_db_state())
# delete again
broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
self.assertEqual(SHARDED, broker.get_db_state())
do_revive_shard_delete(shard_ranges)
do_revive_shard_delete(shard_ranges)
@with_tempdir
def test_set_sharding_state(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
broker.merge_items([{'name': 'obj_%d' % i,
'created_at': next(self.ts).normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': 0,
'storage_policy_index': 0,
} for i in range(1, 6)])
broker.set_x_container_sync_points(1, 2)
broker.update_reconciler_sync(3)
self.assertEqual(3, broker.get_reconciler_sync())
broker.reported(next(self.ts).internal, next(self.ts).internal,
next(self.ts).internal, next(self.ts).internal)
epoch = next(self.ts)
broker.enable_sharding(epoch)
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertFalse(broker.is_deleted())
retiring_info = broker.get_info()
self.assertEqual(1, len(broker.db_files))
self.assertTrue(broker.set_sharding_state())
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
self.assertEqual(SHARDING, broker.get_db_state())
fresh_info = broker.get_info()
for key in ('reported_put_timestamp', 'reported_delete_timestamp'):
retiring_info.pop(key)
self.assertEqual('0', fresh_info.pop(key), key)
for key in ('reported_object_count', 'reported_bytes_used'):
retiring_info.pop(key)
self.assertEqual(0, fresh_info.pop(key), key)
self.assertNotEqual(retiring_info.pop('id'), fresh_info.pop('id'))
self.assertNotEqual(retiring_info.pop('hash'), fresh_info.pop('hash'))
self.assertNotEqual(retiring_info.pop('x_container_sync_point1'),
fresh_info.pop('x_container_sync_point1'))
self.assertNotEqual(retiring_info.pop('x_container_sync_point2'),
fresh_info.pop('x_container_sync_point2'))
self.assertEqual(-1, broker.get_reconciler_sync())
self.assertEqual('unsharded', retiring_info.pop('db_state'))
self.assertEqual('sharding', fresh_info.pop('db_state'))
self.assertEqual(retiring_info, fresh_info)
self.assertFalse(broker.is_deleted())
self.assertEqual(2, len(broker.db_files))
self.assertEqual(db_path, broker.db_files[0])
fresh_db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash',
'container_%s.db' % epoch.internal)
self.assertEqual(fresh_db_path, broker.db_files[1])
@with_tempdir
def test_set_sharding_state_deleted(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
broker.set_x_container_sync_points(1, 2)
broker.update_reconciler_sync(3)
self.assertEqual(3, broker.get_reconciler_sync())
broker.reported(next(self.ts).internal, next(self.ts).internal,
next(self.ts).internal, next(self.ts).internal)
epoch = next(self.ts)
broker.enable_sharding(epoch)
self.assertEqual(UNSHARDED, broker.get_db_state())
broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
retiring_info = broker.get_info()
self.assertEqual("DELETED", retiring_info['status'])
self.assertEqual(1, len(broker.db_files))
self.assertTrue(broker.set_sharding_state())
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
self.assertEqual(SHARDING, broker.get_db_state())
fresh_info = broker.get_info()
for key in ('reported_put_timestamp', 'reported_delete_timestamp'):
retiring_info.pop(key)
self.assertEqual('0', fresh_info.pop(key), key)
for key in ('reported_object_count', 'reported_bytes_used'):
retiring_info.pop(key)
self.assertEqual(0, fresh_info.pop(key), key)
self.assertNotEqual(retiring_info.pop('id'), fresh_info.pop('id'))
self.assertNotEqual(retiring_info.pop('x_container_sync_point1'),
fresh_info.pop('x_container_sync_point1'))
self.assertNotEqual(retiring_info.pop('x_container_sync_point2'),
fresh_info.pop('x_container_sync_point2'))
self.assertEqual(-1, broker.get_reconciler_sync())
self.assertEqual('unsharded', retiring_info.pop('db_state'))
self.assertEqual('sharding', fresh_info.pop('db_state'))
self.assertEqual(retiring_info, fresh_info)
self.assertTrue(broker.is_deleted())
self.assertEqual(2, len(broker.db_files))
self.assertEqual(db_path, broker.db_files[0])
fresh_db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash',
'container_%s.db' % epoch.internal)
self.assertEqual(fresh_db_path, broker.db_files[1])
@with_tempdir
def test_set_sharding_state_errors(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
broker.enable_sharding(next(self.ts))
orig_execute = GreenDBConnection.execute
trigger = 'INSERT into object'
def mock_execute(conn, *args, **kwargs):
if trigger in args[0]:
raise sqlite3.OperationalError()
return orig_execute(conn, *args, **kwargs)
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
res = broker.set_sharding_state()
self.assertFalse(res)
lines = broker.logger.get_lines_for_level('error')
self.assertIn('Failed to set the ROWID', lines[0])
self.assertFalse(lines[1:])
broker.logger.clear()
trigger = 'UPDATE container_stat SET created_at'
with mock.patch('swift.common.db.GreenDBConnection.execute',
mock_execute):
res = broker.set_sharding_state()
self.assertFalse(res)
lines = broker.logger.get_lines_for_level('error')
self.assertIn(
'Failed to sync the container_stat table/view with the fresh '
'database', lines[0])
self.assertFalse(lines[1:])
@with_tempdir
def test_set_sharded_state_errors(self, tempdir):
retiring_db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(retiring_db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
pre_epoch = next(self.ts)
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
# unlink fails
with mock.patch('os.unlink', side_effect=OSError(errno.EPERM)):
self.assertFalse(broker.set_sharded_state())
lines = broker.logger.get_lines_for_level('error')
self.assertIn('Failed to unlink', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertTrue(os.path.exists(retiring_db_path))
self.assertTrue(os.path.exists(broker.db_file))
# extra files
extra_filename = make_db_file_path(broker.db_file, pre_epoch)
self.assertNotEqual(extra_filename, broker.db_file) # sanity check
with open(extra_filename, 'wb'):
pass
broker.logger.clear()
self.assertFalse(broker.set_sharded_state())
lines = broker.logger.get_lines_for_level('warning')
self.assertIn('Still have multiple db files', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(broker.logger.get_lines_for_level('error'))
self.assertTrue(os.path.exists(retiring_db_path))
self.assertTrue(os.path.exists(broker.db_file))
# retiring file missing
broker.logger.clear()
os.unlink(retiring_db_path)
self.assertFalse(broker.set_sharded_state())
lines = broker.logger.get_lines_for_level('warning')
self.assertIn('Refusing to delete', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(broker.logger.get_lines_for_level('error'))
self.assertTrue(os.path.exists(broker.db_file))
@with_tempdir
def test_get_brokers(self, tempdir):
retiring_db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(retiring_db_path, account='a', container='c',
logger=debug_logger())
broker.initialize(next(self.ts).internal, 0)
brokers = broker.get_brokers()
self.assertEqual(retiring_db_path, brokers[0].db_file)
self.assertFalse(brokers[0].skip_commits)
self.assertFalse(brokers[1:])
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
brokers = broker.get_brokers()
self.assertEqual(retiring_db_path, brokers[0].db_file)
self.assertTrue(brokers[0].skip_commits)
self.assertEqual(broker.db_file, brokers[1].db_file)
self.assertFalse(brokers[1].skip_commits)
self.assertFalse(brokers[2:])
# same outcome when called on retiring db broker
brokers = brokers[0].get_brokers()
self.assertEqual(retiring_db_path, brokers[0].db_file)
self.assertTrue(brokers[0].skip_commits)
self.assertEqual(broker.db_file, brokers[1].db_file)
self.assertFalse(brokers[1].skip_commits)
self.assertFalse(brokers[2:])
self.assertTrue(broker.set_sharded_state())
brokers = broker.get_brokers()
self.assertEqual(broker.db_file, brokers[0].db_file)
self.assertFalse(brokers[0].skip_commits)
self.assertFalse(brokers[1:])
# unexpected extra file should be ignored
with open(retiring_db_path, 'wb'):
pass
retiring_db_path = broker.db_file
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
broker.reload_db_files()
self.assertEqual(3, len(broker.db_files)) # sanity check
brokers = broker.get_brokers()
self.assertEqual(retiring_db_path, brokers[0].db_file)
self.assertTrue(brokers[0].skip_commits)
self.assertEqual(broker.db_file, brokers[1].db_file)
self.assertFalse(brokers[1].skip_commits)
self.assertFalse(brokers[2:])
lines = broker.logger.get_lines_for_level('warning')
self.assertIn('Unexpected db files', lines[0])
self.assertFalse(lines[1:])
@with_tempdir
def test_merge_shard_ranges(self, tempdir):
ts = [next(self.ts) for _ in range(16)]
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(
db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
# sanity check
self.assertFalse(broker.get_shard_ranges(include_deleted=True))
broker.merge_shard_ranges(None)
self.assertFalse(broker.get_shard_ranges(include_deleted=True))
# merge item at ts1
# sr_<upper>_<created ts>_<meta ts>
sr_b_1_1 = ShardRange('a/c_b', ts[1], lower='a', upper='b',
object_count=2)
broker.merge_shard_ranges([sr_b_1_1])
self._assert_shard_ranges(broker, [sr_b_1_1])
# merge older item - ignored
sr_b_0_0 = ShardRange('a/c_b', ts[0], lower='a', upper='b',
object_count=1)
broker.merge_shard_ranges([sr_b_0_0])
self._assert_shard_ranges(broker, [sr_b_1_1])
# merge same timestamp - ignored
broker.merge_shard_ranges([dict(sr_b_1_1, lower='', upper='c')])
self._assert_shard_ranges(broker, [sr_b_1_1])
broker.merge_shard_ranges([dict(sr_b_1_1, object_count=99)])
self._assert_shard_ranges(broker, [sr_b_1_1])
# merge list with older item *after* newer item
sr_c_2_2 = ShardRange('a/c_c', ts[2], lower='b', upper='c',
object_count=3)
sr_c_3_3 = ShardRange('a/c_c', ts[3], lower='b', upper='c',
object_count=4)
broker.merge_shard_ranges([sr_c_3_3, sr_c_2_2])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_3_3])
# merge newer item - updated
sr_c_5_5 = ShardRange('a/c_c', ts[5], lower='b', upper='c',
object_count=5)
broker.merge_shard_ranges([sr_c_5_5])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_5])
# merge older metadata item - ignored
sr_c_5_4 = ShardRange('a/c_c', ts[5], lower='b', upper='c',
object_count=6, meta_timestamp=ts[4])
broker.merge_shard_ranges([sr_c_5_4])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_5])
# merge newer metadata item - only metadata is updated
sr_c_5_6 = ShardRange('a/c_c', ts[5], lower='b', upper='c',
object_count=7, meta_timestamp=ts[6])
broker.merge_shard_ranges([dict(sr_c_5_6, lower='', upper='d')])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_6])
# merge older created_at, newer metadata item - ignored
sr_c_4_7 = ShardRange('a/c_c', ts[4], lower='b', upper='c',
object_count=8, meta_timestamp=ts[7])
broker.merge_shard_ranges([sr_c_4_7])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_6])
# merge list with older metadata item *after* newer metadata item
sr_c_5_11 = ShardRange('a/c_c', ts[5], lower='b', upper='c',
object_count=9, meta_timestamp=ts[11])
broker.merge_shard_ranges([sr_c_5_11, sr_c_5_6])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_11])
# deleted item at *same timestamp* as existing - deleted ignored
broker.merge_shard_ranges([dict(sr_b_1_1, deleted=1, object_count=0)])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_11])
sr_b_1_1.meta_timestamp = ts[11]
broker.merge_shard_ranges([dict(sr_b_1_1, deleted=1)])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_11])
sr_b_1_1.state_timestamp = ts[11]
broker.merge_shard_ranges([dict(sr_b_1_1, deleted=1)])
self._assert_shard_ranges(broker, [sr_b_1_1, sr_c_5_11])
# delete item at *newer timestamp* - updated
sr_b_2_2_deleted = ShardRange('a/c_b', ts[2], lower='a', upper='b',
object_count=0, deleted=1)
broker.merge_shard_ranges([sr_b_2_2_deleted])
self._assert_shard_ranges(broker, [sr_b_2_2_deleted, sr_c_5_11])
# merge list with older undeleted item *after* newer deleted item
# NB deleted timestamp trumps newer meta timestamp
sr_c_9_12 = ShardRange('a/c_c', ts[9], lower='b', upper='c',
object_count=10, meta_timestamp=ts[12])
sr_c_10_10_deleted = ShardRange('a/c_c', ts[10], lower='b', upper='c',
object_count=0, deleted=1)
broker.merge_shard_ranges([sr_c_10_10_deleted, sr_c_9_12])
self._assert_shard_ranges(
broker, [sr_b_2_2_deleted, sr_c_10_10_deleted])
# merge a ShardRangeList
sr_b_13 = ShardRange('a/c_b', ts[13], lower='a', upper='b',
object_count=10, meta_timestamp=ts[13])
sr_c_13 = ShardRange('a/c_c', ts[13], lower='b', upper='c',
object_count=10, meta_timestamp=ts[13])
broker.merge_shard_ranges(ShardRangeList([sr_c_13, sr_b_13]))
self._assert_shard_ranges(
broker, [sr_b_13, sr_c_13])
# merge with tombstones but same meta_timestamp
sr_c_13_tombs = ShardRange('a/c_c', ts[13], lower='b', upper='c',
object_count=10, meta_timestamp=ts[13],
tombstones=999)
broker.merge_shard_ranges(sr_c_13_tombs)
self._assert_shard_ranges(
broker, [sr_b_13, sr_c_13])
# merge with tombstones at newer meta_timestamp
sr_c_13_tombs = ShardRange('a/c_c', ts[13], lower='b', upper='c',
object_count=1, meta_timestamp=ts[14],
tombstones=999)
broker.merge_shard_ranges(sr_c_13_tombs)
self._assert_shard_ranges(
broker, [sr_b_13, sr_c_13_tombs])
@with_tempdir
def test_merge_shard_ranges_state(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
expected_shard_ranges = []
def do_test(orig_state, orig_timestamp, test_state, test_timestamp,
expected_state, expected_timestamp):
index = len(expected_shard_ranges)
sr = ShardRange('a/%s' % index, orig_timestamp, '%03d' % index,
'%03d' % (index + 1), state=orig_state)
broker.merge_shard_ranges([sr])
sr.state = test_state
sr.state_timestamp = test_timestamp
broker.merge_shard_ranges([sr])
sr.state = expected_state
sr.state_timestamp = expected_timestamp
expected_shard_ranges.append(sr)
self._assert_shard_ranges(broker, expected_shard_ranges)
# state at older state_timestamp is not merged
for orig_state in ShardRange.STATES:
for test_state in ShardRange.STATES:
ts_older = next(self.ts)
ts = next(self.ts)
do_test(orig_state, ts, test_state, ts_older, orig_state, ts)
# more advanced state at same timestamp is merged
for orig_state in ShardRange.STATES:
for test_state in ShardRange.STATES:
ts = next(self.ts)
do_test(orig_state, ts, test_state, ts,
test_state if test_state > orig_state else orig_state,
ts)
# any state at newer timestamp is merged
for orig_state in ShardRange.STATES:
for test_state in ShardRange.STATES:
ts = next(self.ts)
ts_newer = next(self.ts)
do_test(orig_state, ts, test_state, ts_newer, test_state,
ts_newer)
def _check_object_stats_when_old_style_sharded(
self, a, c, root_a, root_c, tempdir):
# common setup and assertions for root and shard containers
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(
db_path, account=a, container=c)
broker.initialize(next(self.ts).internal, 0)
broker.set_sharding_sysmeta('Root', '%s/%s' % (root_a, root_c))
broker.merge_items([{'name': 'obj', 'size': 14, 'etag': 'blah',
'content_type': 'text/plain', 'deleted': 0,
'created_at': Timestamp.now().internal}])
self.assertEqual(1, broker.get_info()['object_count'])
self.assertEqual(14, broker.get_info()['bytes_used'])
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
sr_1 = ShardRange(
'%s/%s1' % (root_a, root_c), Timestamp.now(), lower='', upper='m',
object_count=99, bytes_used=999, state=ShardRange.ACTIVE)
sr_2 = ShardRange(
'%s/%s2' % (root_a, root_c), Timestamp.now(), lower='m', upper='',
object_count=21, bytes_used=1000, state=ShardRange.ACTIVE)
broker.merge_shard_ranges([sr_1, sr_2])
self.assertEqual(1, broker.get_info()['object_count'])
self.assertEqual(14, broker.get_info()['bytes_used'])
return broker
@with_tempdir
def test_object_stats_old_style_root_container(self, tempdir):
broker = self._check_object_stats_when_old_style_sharded(
'a', 'c', 'a', 'c', tempdir)
self.assertTrue(broker.is_root_container()) # sanity
self.assertTrue(broker.set_sharded_state())
self.assertEqual(120, broker.get_info()['object_count'])
self.assertEqual(1999, broker.get_info()['bytes_used'])
@with_tempdir
def test_object_stats_old_style_shard_container(self, tempdir):
broker = self._check_object_stats_when_old_style_sharded(
'.shard_a', 'c-blah', 'a', 'c', tempdir)
self.assertFalse(broker.is_root_container()) # sanity
self.assertTrue(broker.set_sharded_state())
self.assertEqual(0, broker.get_info()['object_count'])
self.assertEqual(0, broker.get_info()['bytes_used'])
def _check_object_stats_when_sharded(self, a, c, root_a, root_c, tempdir):
# common setup and assertions for root and shard containers
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
broker = ContainerBroker(
db_path, account=a, container=c)
broker.initialize(next(self.ts).internal, 0)
broker.set_sharding_sysmeta('Quoted-Root', '%s/%s' % (root_a, root_c))
broker.merge_items([{'name': 'obj', 'size': 14, 'etag': 'blah',
'content_type': 'text/plain', 'deleted': 0,
'created_at': Timestamp.now().internal}])
self.assertEqual(1, broker.get_info()['object_count'])
self.assertEqual(14, broker.get_info()['bytes_used'])
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
sr_1 = ShardRange(
'%s/%s1' % (root_a, root_c), Timestamp.now(), lower='', upper='m',
object_count=99, bytes_used=999, state=ShardRange.ACTIVE)
sr_2 = ShardRange(
'%s/%s2' % (root_a, root_c), Timestamp.now(), lower='m', upper='',
object_count=21, bytes_used=1000, state=ShardRange.ACTIVE)
broker.merge_shard_ranges([sr_1, sr_2])
self.assertEqual(1, broker.get_info()['object_count'])
self.assertEqual(14, broker.get_info()['bytes_used'])
return broker
@with_tempdir
def test_object_stats_root_container(self, tempdir):
broker = self._check_object_stats_when_sharded(
'a', 'c', 'a', 'c', tempdir)
self.assertTrue(broker.is_root_container()) # sanity
self.assertTrue(broker.set_sharded_state())
self.assertEqual(120, broker.get_info()['object_count'])
self.assertEqual(1999, broker.get_info()['bytes_used'])
@with_tempdir
def test_object_stats_shard_container(self, tempdir):
broker = self._check_object_stats_when_sharded(
'.shard_a', 'c-blah', 'a', 'c', tempdir)
self.assertFalse(broker.is_root_container()) # sanity
self.assertTrue(broker.set_sharded_state())
self.assertEqual(0, broker.get_info()['object_count'])
self.assertEqual(0, broker.get_info()['bytes_used'])
class TestCommonContainerBroker(test_db.TestExampleBroker):
broker_class = ContainerBroker
server_type = 'container'
def setUp(self):
super(TestCommonContainerBroker, self).setUp()
self.policy = random.choice(list(POLICIES))
def put_item(self, broker, timestamp):
broker.put_object('test', timestamp, 0, 'text/plain', 'x',
storage_policy_index=int(self.policy))
def delete_item(self, broker, timestamp):
broker.delete_object('test', timestamp,
storage_policy_index=int(self.policy))
class ContainerBrokerMigrationMixin(test_db.TestDbBase):
"""
Mixin for running ContainerBroker against databases created with
older schemas.
"""
class OverrideCreateShardRangesTable(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, obj_type):
if inspect.stack()[1][3] == '_initialize':
return lambda *a, **kw: None
return self.func.__get__(obj, obj_type)
def setUp(self):
super(ContainerBrokerMigrationMixin, self).setUp()
self._imported_create_object_table = \
ContainerBroker.create_object_table
ContainerBroker.create_object_table = \
prespi_create_object_table
self._imported_create_container_info_table = \
ContainerBroker.create_container_info_table
ContainerBroker.create_container_info_table = \
premetadata_create_container_info_table
self._imported_create_policy_stat_table = \
ContainerBroker.create_policy_stat_table
ContainerBroker.create_policy_stat_table = lambda *args: None
self._imported_create_shard_range_table = \
ContainerBroker.create_shard_range_table
if 'shard_range' not in self.expected_db_tables:
ContainerBroker.create_shard_range_table = \
self.OverrideCreateShardRangesTable(
ContainerBroker.create_shard_range_table)
self.ts = make_timestamp_iter()
@classmethod
@contextmanager
def old_broker(cls):
cls.runTest = lambda *a, **k: None
case = cls()
case.setUp()
try:
yield ContainerBroker
finally:
case.tearDown()
def tearDown(self):
ContainerBroker.create_container_info_table = \
self._imported_create_container_info_table
ContainerBroker.create_object_table = \
self._imported_create_object_table
ContainerBroker.create_shard_range_table = \
self._imported_create_shard_range_table
ContainerBroker.create_policy_stat_table = \
self._imported_create_policy_stat_table
# We need to manually teardown and clean the self.tempdir
def premetadata_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript('''
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
''')
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeMetadata(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created before
the metadata column was added.
"""
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat'}
def setUp(self):
super(TestContainerBrokerBeforeMetadata, self).setUp()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM container_stat')
except BaseException as err:
exc = err
self.assertTrue('no such column: metadata' in str(exc))
def tearDown(self):
super(TestContainerBrokerBeforeMetadata, self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT metadata FROM container_stat')
test_db.TestDbBase.tearDown(self)
def prexsync_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeXSync(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the x_container_sync_point[12] columns were added.
"""
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat'}
def setUp(self):
super(TestContainerBrokerBeforeXSync, self).setUp()
ContainerBroker.create_container_info_table = \
prexsync_create_container_info_table
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
exc = None
with broker.get() as conn:
try:
conn.execute('''SELECT x_container_sync_point1
FROM container_stat''')
except BaseException as err:
exc = err
self.assertTrue('no such column: x_container_sync_point1' in str(exc))
def tearDown(self):
super(TestContainerBrokerBeforeXSync, self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT x_container_sync_point1 FROM container_stat')
test_db.TestDbBase.tearDown(self)
def prespi_create_object_table(self, conn, *args, **kwargs):
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_insert AFTER INSERT ON object
BEGIN
UPDATE container_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER object_delete AFTER DELETE ON object
BEGIN
UPDATE container_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size,
hash = chexor(hash, old.name, old.created_at);
END;
""")
def prespi_create_container_info_table(self, conn, put_timestamp,
_spi=None):
"""
Copied from ContainerBroker before the
storage_policy_index column was added; used for testing with
TestContainerBrokerBeforeSPI.
Create the container_stat table which is specific to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeSPI(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the storage_policy_index column was added.
"""
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat'}
def setUp(self):
super(TestContainerBrokerBeforeSPI, self).setUp()
ContainerBroker.create_container_info_table = \
prespi_create_container_info_table
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('''SELECT storage_policy_index
FROM container_stat''')
self.assertIn('no such column: storage_policy_index',
str(raised.exception))
def tearDown(self):
super(TestContainerBrokerBeforeSPI, self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('SELECT storage_policy_index FROM container_stat')
test_db.TestDbBase.tearDown(self)
@patch_policies
@with_tempdir
def test_object_table_migration(self, tempdir):
db_path = os.path.join(tempdir, 'container.db')
# initialize an un-migrated database
broker = ContainerBroker(db_path, account='a', container='c')
put_timestamp = Timestamp(int(time())).internal
broker.initialize(put_timestamp, None)
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM object
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from object table!')
# manually insert an existing row to avoid automatic migration
obj_put_timestamp = Timestamp.now().internal
with broker.get() as conn:
conn.execute('''
INSERT INTO object (name, created_at, size,
content_type, etag, deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', ('test_name', obj_put_timestamp, 123,
'text/plain', '8f4c680e75ca4c81dc1917ddab0a0b5c', 0))
conn.commit()
# make sure we can iter objects without performing migration
for o in broker.list_objects_iter(1, None, None, None, None):
self.assertEqual(o, ('test_name', obj_put_timestamp, 123,
'text/plain',
'8f4c680e75ca4c81dc1917ddab0a0b5c'))
# get_info
info = broker.get_info()
expected = {
'account': 'a',
'container': 'c',
'put_timestamp': put_timestamp,
'delete_timestamp': '0',
'status_changed_at': '0',
'bytes_used': 123,
'object_count': 1,
'reported_put_timestamp': '0',
'reported_delete_timestamp': '0',
'reported_object_count': 0,
'reported_bytes_used': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1,
'storage_policy_index': 0,
}
for k, v in expected.items():
self.assertEqual(info[k], v,
'The value for %s was %r not %r' % (
k, info[k], v))
self.assertTrue(
Timestamp(info['created_at']) > Timestamp(put_timestamp))
self.assertNotEqual(int(info['hash'], 16), 0)
orig_hash = info['hash']
# get_replication_info
info = broker.get_replication_info()
# translate object count for replicators
expected['count'] = expected.pop('object_count')
for k, v in expected.items():
self.assertEqual(info[k], v)
self.assertTrue(
Timestamp(info['created_at']) > Timestamp(put_timestamp))
self.assertEqual(info['hash'], orig_hash)
self.assertEqual(info['max_row'], 1)
self.assertEqual(info['metadata'], '')
# get_policy_stats
info = broker.get_policy_stats()
expected = {
0: {'bytes_used': 123, 'object_count': 1}
}
self.assertEqual(info, expected)
# empty & is_deleted
self.assertEqual(broker.empty(), False)
self.assertEqual(broker.is_deleted(), False)
# no migrations have occurred yet
# container_stat table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM container_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from container_stat table!')
# object table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM object
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from object table!')
# policy_stat table
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM policy_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table does not exist yet
self.assertTrue('no such table: policy_stat' in str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from policy_stat table!')
# now do a PUT with a different value for storage_policy_index
# which will update the DB schema as well as update policy_stats
# for legacy objects in the DB (those without an SPI)
second_object_put_timestamp = Timestamp.now().internal
other_policy = [p for p in POLICIES if p.idx != 0][0]
broker.put_object('test_second', second_object_put_timestamp,
456, 'text/plain',
'cbac50c175793513fa3c581551c876ab',
storage_policy_index=other_policy.idx)
broker._commit_puts_stale_ok()
# we are fully migrated and both objects have their
# storage_policy_index
with broker.get() as conn:
storage_policy_index = conn.execute('''
SELECT storage_policy_index FROM container_stat
''').fetchone()[0]
self.assertEqual(storage_policy_index, 0)
rows = conn.execute('''
SELECT name, storage_policy_index FROM object
''').fetchall()
for row in rows:
if row[0] == 'test_name':
self.assertEqual(row[1], 0)
else:
self.assertEqual(row[1], other_policy.idx)
# and all stats tracking is in place
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 2)
self.assertEqual(stats[0]['object_count'], 1)
self.assertEqual(stats[0]['bytes_used'], 123)
self.assertEqual(stats[other_policy.idx]['object_count'], 1)
self.assertEqual(stats[other_policy.idx]['bytes_used'], 456)
# get info still reports on the legacy storage policy
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 123)
# unless you change the storage policy
broker.set_storage_policy_index(other_policy.idx)
info = broker.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 456)
class TestContainerBrokerBeforeShardRanges(ContainerBrokerMigrationMixin,
TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the shard_ranges table was added.
"""
# *grumble grumble* This should include container_info/policy_stat :-/
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat'}
def setUp(self):
super(TestContainerBrokerBeforeShardRanges, self).setUp()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('''SELECT *
FROM shard_range''')
self.assertIn('no such table: shard_range', str(raised.exception))
def tearDown(self):
super(TestContainerBrokerBeforeShardRanges, self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('''SELECT *
FROM shard_range''')
test_db.TestDbBase.tearDown(self)
def pre_reported_create_shard_range_table(self, conn):
"""
Copied from ContainerBroker before the
reported column was added; used for testing with
TestContainerBrokerBeforeShardRangeReportedColumn.
Create a shard_range table with no 'reported' column.
:param conn: DB connection object
"""
conn.execute("""
CREATE TABLE shard_range (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT
);
""")
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""")
class TestContainerBrokerBeforeShardRangeReportedColumn(
ContainerBrokerMigrationMixin, TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the shard_ranges table reported column was added.
"""
# *grumble grumble* This should include container_info/policy_stat :-/
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat', 'shard_range'}
def setUp(self):
super(TestContainerBrokerBeforeShardRangeReportedColumn,
self).setUp()
ContainerBroker.create_shard_range_table = \
pre_reported_create_shard_range_table
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('''SELECT reported
FROM shard_range''')
self.assertIn('no such column: reported', str(raised.exception))
def tearDown(self):
super(TestContainerBrokerBeforeShardRangeReportedColumn,
self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('''SELECT reported
FROM shard_range''')
test_db.TestDbBase.tearDown(self)
@with_tempdir
def test_get_shard_ranges_attempts(self, tempdir):
# verify that old broker handles new sql query for shard range rows
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
@contextmanager
def patch_execute():
with broker.get() as conn:
mock_conn = mock.MagicMock()
mock_execute = mock.MagicMock()
mock_conn.execute = mock_execute
@contextmanager
def mock_get():
yield mock_conn
with mock.patch.object(broker, 'get', mock_get):
yield mock_execute, conn
with patch_execute() as (mock_execute, conn):
mock_execute.side_effect = conn.execute
broker.get_shard_ranges()
expected = [
mock.call('\n SELECT name, timestamp, lower, upper, '
'object_count, bytes_used, meta_timestamp, deleted, '
'state, state_timestamp, epoch, reported, '
'tombstones\n '
'FROM shard_range WHERE deleted=0 AND name != ?;\n'
' ', ['a/c']),
mock.call('\n SELECT name, timestamp, lower, upper, '
'object_count, bytes_used, meta_timestamp, deleted, '
'state, state_timestamp, epoch, 0 as reported, '
'tombstones\n '
'FROM shard_range WHERE deleted=0 AND name != ?;\n'
' ', ['a/c']),
mock.call('\n SELECT name, timestamp, lower, upper, '
'object_count, bytes_used, meta_timestamp, deleted, '
'state, state_timestamp, epoch, 0 as reported, '
'-1 as tombstones\n '
'FROM shard_range WHERE deleted=0 AND name != ?;\n'
' ', ['a/c']),
]
self.assertEqual(expected, mock_execute.call_args_list,
mock_execute.call_args_list)
# if unexpectedly the call to execute continues to fail for reported,
# verify that the exception is raised after a retry
with patch_execute() as (mock_execute, conn):
def mock_execute_handler(*args, **kwargs):
if len(mock_execute.call_args_list) < 3:
return conn.execute(*args, **kwargs)
else:
raise sqlite3.OperationalError('no such column: reported')
mock_execute.side_effect = mock_execute_handler
with self.assertRaises(sqlite3.OperationalError):
broker.get_shard_ranges()
self.assertEqual(expected, mock_execute.call_args_list,
mock_execute.call_args_list)
# if unexpectedly the call to execute continues to fail for tombstones,
# verify that the exception is raised after a retry
with patch_execute() as (mock_execute, conn):
def mock_execute_handler(*args, **kwargs):
if len(mock_execute.call_args_list) < 3:
return conn.execute(*args, **kwargs)
else:
raise sqlite3.OperationalError(
'no such column: tombstones')
mock_execute.side_effect = mock_execute_handler
with self.assertRaises(sqlite3.OperationalError):
broker.get_shard_ranges()
self.assertEqual(expected, mock_execute.call_args_list,
mock_execute.call_args_list)
@with_tempdir
def test_merge_shard_ranges_migrates_table(self, tempdir):
# verify that old broker migrates shard range table
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
shard_ranges = [ShardRange('.shards_a/c_0', next(self.ts), 'a', 'b'),
ShardRange('.shards_a/c_1', next(self.ts), 'b', 'c')]
orig_migrate_reported = broker._migrate_add_shard_range_reported
orig_migrate_tombstones = broker._migrate_add_shard_range_tombstones
with mock.patch.object(
broker, '_migrate_add_shard_range_reported',
side_effect=orig_migrate_reported) as mocked_reported:
with mock.patch.object(
broker, '_migrate_add_shard_range_tombstones',
side_effect=orig_migrate_tombstones) as mocked_tombstones:
broker.merge_shard_ranges(shard_ranges[:1])
mocked_reported.assert_called_once_with(mock.ANY)
mocked_tombstones.assert_called_once_with(mock.ANY)
self._assert_shard_ranges(broker, shard_ranges[:1])
with mock.patch.object(
broker, '_migrate_add_shard_range_reported',
side_effect=orig_migrate_reported) as mocked_reported:
with mock.patch.object(
broker, '_migrate_add_shard_range_tombstones',
side_effect=orig_migrate_tombstones) as mocked_tombstones:
broker.merge_shard_ranges(shard_ranges[1:])
mocked_reported.assert_not_called()
mocked_tombstones.assert_not_called()
self._assert_shard_ranges(broker, shard_ranges)
@with_tempdir
def test_merge_shard_ranges_fails_to_migrate_table(self, tempdir):
# verify that old broker will raise exception if it unexpectedly fails
# to migrate shard range table
db_path = os.path.join(tempdir, 'container.db')
broker = ContainerBroker(db_path, account='a', container='c')
broker.initialize(next(self.ts).internal, 0)
shard_ranges = [ShardRange('.shards_a/c_0', next(self.ts), 'a', 'b'),
ShardRange('.shards_a/c_1', next(self.ts), 'b', 'c')]
# unexpected error during migration
with mock.patch.object(
broker, '_migrate_add_shard_range_reported',
side_effect=sqlite3.OperationalError('unexpected')) \
as mocked_reported:
with self.assertRaises(sqlite3.OperationalError):
broker.merge_shard_ranges(shard_ranges)
# one failed attempt was made to add reported column
self.assertEqual(1, mocked_reported.call_count)
# migration silently fails
with mock.patch.object(
broker, '_migrate_add_shard_range_reported') \
as mocked_reported:
with self.assertRaises(sqlite3.OperationalError):
broker.merge_shard_ranges(shard_ranges)
# one failed attempt was made to add reported column
self.assertEqual(1, mocked_reported.call_count)
with mock.patch.object(
broker, '_migrate_add_shard_range_tombstones') \
as mocked_tombstones:
with self.assertRaises(sqlite3.OperationalError):
broker.merge_shard_ranges(shard_ranges)
# first migration adds reported column
# one failed attempt was made to add tombstones column
self.assertEqual(1, mocked_tombstones.call_count)
def pre_tombstones_create_shard_range_table(self, conn):
"""
Copied from ContainerBroker before the
tombstones column was added; used for testing with
TestContainerBrokerBeforeShardRangeTombstonesColumn.
Create a shard_range table with no 'tombstones' column.
:param conn: DB connection object
"""
# Use execute (not executescript) so we get the benefits of our
# GreenDBConnection. Creating a table requires a whole-DB lock;
# *any* in-progress cursor will otherwise trip a "database is locked"
# error.
conn.execute("""
CREATE TABLE shard_range (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT,
reported INTEGER DEFAULT 0
);
""")
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""")
class TestContainerBrokerBeforeShardRangeTombstonesColumn(
ContainerBrokerMigrationMixin, TestContainerBroker):
"""
Tests for ContainerBroker against databases created
before the shard_ranges table tombstones column was added.
"""
expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
'sqlite_sequence', 'container_stat', 'shard_range'}
def setUp(self):
super(TestContainerBrokerBeforeShardRangeTombstonesColumn,
self).setUp()
ContainerBroker.create_shard_range_table = \
pre_tombstones_create_shard_range_table
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('''SELECT tombstones
FROM shard_range''')
self.assertIn('no such column: tombstones', str(raised.exception))
def tearDown(self):
super(TestContainerBrokerBeforeShardRangeTombstonesColumn,
self).tearDown()
broker = ContainerBroker(self.get_db_path(), account='a',
container='c')
broker.initialize(Timestamp('1').internal, 0)
with broker.get() as conn:
conn.execute('''SELECT tombstones
FROM shard_range''')
test_db.TestDbBase.tearDown(self)
class TestUpdateNewItemFromExisting(unittest.TestCase):
# TODO: add test scenarios that have swift_bytes in content_type
t0 = '1234567890.00000'
t1 = '1234567890.00001'
t2 = '1234567890.00002'
t3 = '1234567890.00003'
t4 = '1234567890.00004'
t5 = '1234567890.00005'
t6 = '1234567890.00006'
t7 = '1234567890.00007'
t8 = '1234567890.00008'
t20 = '1234567890.00020'
t30 = '1234567890.00030'
base_new_item = {'etag': 'New_item',
'size': 'nEw_item',
'content_type': 'neW_item',
'deleted': '0'}
base_existing = {'etag': 'Existing',
'size': 'eXisting',
'content_type': 'exIsting',
'deleted': '0'}
#
# each scenario is a tuple of:
# (existing time, new item times, expected updated item)
#
# e.g.:
# existing -> ({'created_at': t5},
# new_item -> {'created_at': t, 'ctype_timestamp': t, 'meta_timestamp': t},
# expected -> {'created_at': t,
# 'etag': <val>, 'size': <val>, 'content_type': <val>})
#
scenarios_when_all_existing_wins = (
#
# all new_item times <= all existing times -> existing values win
#
# existing has attrs at single time
#
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t0, 'meta_timestamp': t0},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t0, 'meta_timestamp': t1},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t1, 'meta_timestamp': t1},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t1, 'meta_timestamp': t2},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t1, 'meta_timestamp': t3},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t0, 'ctype_timestamp': t3, 'meta_timestamp': t3},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t3, 'ctype_timestamp': t3, 'meta_timestamp': t3},
{'created_at': t3,
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
#
# existing has attrs at multiple times:
# data @ t3, ctype @ t5, meta @t7 -> existing created_at = t3+2+2
#
({'created_at': t3 + '+2+2'},
{'created_at': t0, 'ctype_timestamp': t0, 'meta_timestamp': t0},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t3, 'meta_timestamp': t3},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t4},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t5},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t7},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t7},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t5, 'meta_timestamp': t5},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t5, 'meta_timestamp': t6},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t5, 'meta_timestamp': t7},
{'created_at': t3 + '+2+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
)
scenarios_when_all_new_item_wins = (
# no existing record
(None,
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t4},
{'created_at': t4,
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
(None,
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t5},
{'created_at': t4 + '+0+1',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
(None,
{'created_at': t4, 'ctype_timestamp': t5, 'meta_timestamp': t5},
{'created_at': t4 + '+1+0',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
(None,
{'created_at': t4, 'ctype_timestamp': t5, 'meta_timestamp': t6},
{'created_at': t4 + '+1+1',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
#
# all new_item times > all existing times -> new item values win
#
# existing has attrs at single time
#
({'created_at': t3},
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t4},
{'created_at': t4,
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3},
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t5},
{'created_at': t4 + '+0+1',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3},
{'created_at': t4, 'ctype_timestamp': t5, 'meta_timestamp': t5},
{'created_at': t4 + '+1+0',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3},
{'created_at': t4, 'ctype_timestamp': t5, 'meta_timestamp': t6},
{'created_at': t4 + '+1+1',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
#
# existing has attrs at multiple times:
# data @ t3, ctype @ t5, meta @t7 -> existing created_at = t3+2+2
#
({'created_at': t3 + '+2+2'},
{'created_at': t4, 'ctype_timestamp': t6, 'meta_timestamp': t8},
{'created_at': t4 + '+2+2',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t6, 'ctype_timestamp': t6, 'meta_timestamp': t8},
{'created_at': t6 + '+0+2',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t4, 'ctype_timestamp': t8, 'meta_timestamp': t8},
{'created_at': t4 + '+4+0',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t6, 'ctype_timestamp': t8, 'meta_timestamp': t8},
{'created_at': t6 + '+2+0',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t8, 'ctype_timestamp': t8, 'meta_timestamp': t8},
{'created_at': t8,
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
)
scenarios_when_some_new_item_wins = (
#
# some but not all new_item times > existing times -> mixed updates
#
# existing has attrs at single time
#
({'created_at': t3},
{'created_at': t3, 'ctype_timestamp': t3, 'meta_timestamp': t4},
{'created_at': t3 + '+0+1',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t4},
{'created_at': t3 + '+1+0',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'neW_item'}),
({'created_at': t3},
{'created_at': t3, 'ctype_timestamp': t4, 'meta_timestamp': t5},
{'created_at': t3 + '+1+1',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'neW_item'}),
#
# existing has attrs at multiple times:
# data @ t3, ctype @ t5, meta @t7 -> existing created_at = t3+2+2
#
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t3, 'meta_timestamp': t8},
{'created_at': t3 + '+2+3',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t3, 'ctype_timestamp': t6, 'meta_timestamp': t8},
{'created_at': t3 + '+3+2',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t6},
{'created_at': t4 + '+1+2',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'exIsting'}),
({'created_at': t3 + '+2+2'},
{'created_at': t4, 'ctype_timestamp': t6, 'meta_timestamp': t6},
{'created_at': t4 + '+2+1',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'neW_item'}),
({'created_at': t3 + '+2+2'},
{'created_at': t4, 'ctype_timestamp': t4, 'meta_timestamp': t8},
{'created_at': t4 + '+1+3',
'etag': 'New_item', 'size': 'nEw_item', 'content_type': 'exIsting'}),
# this scenario is to check that the deltas are in hex
({'created_at': t3 + '+2+2'},
{'created_at': t2, 'ctype_timestamp': t20, 'meta_timestamp': t30},
{'created_at': t3 + '+11+a',
'etag': 'Existing', 'size': 'eXisting', 'content_type': 'neW_item'}),
)
def _test_scenario(self, scenario, newer):
existing_time, new_item_times, expected_attrs = scenario
# this is the existing record...
existing = None
if existing_time:
existing = dict(self.base_existing)
existing.update(existing_time)
# this is the new item to update
new_item = dict(self.base_new_item)
new_item.update(new_item_times)
# this is the expected result of the update
expected = dict(new_item)
expected.update(expected_attrs)
expected['data_timestamp'] = new_item['created_at']
try:
self.assertIs(newer,
update_new_item_from_existing(new_item, existing))
self.assertDictEqual(expected, new_item)
except AssertionError as e:
msg = ('Scenario: existing %s, new_item %s, expected %s.'
% scenario)
msg = '%s Failed with: %s' % (msg, e.message)
raise AssertionError(msg)
def test_update_new_item_from_existing(self):
for scenario in self.scenarios_when_all_existing_wins:
self._test_scenario(scenario, False)
for scenario in self.scenarios_when_all_new_item_wins:
self._test_scenario(scenario, True)
for scenario in self.scenarios_when_some_new_item_wins:
self._test_scenario(scenario, True)
class TestModuleFunctions(unittest.TestCase):
def test_sift_shard_ranges(self):
ts_iter = make_timestamp_iter()
existing_shards = {}
sr1 = dict(ShardRange('a/o', next(ts_iter).internal))
sr2 = dict(ShardRange('a/o2', next(ts_iter).internal))
new_shard_ranges = [sr1, sr2]
# first empty existing shards will just add the shards
to_add, to_delete = sift_shard_ranges(new_shard_ranges,
existing_shards)
self.assertEqual(2, len(to_add))
self.assertIn(sr1, to_add)
self.assertIn(sr2, to_add)
self.assertFalse(to_delete)
# if there is a newer version in the existing shards then it won't be
# added to to_add
existing_shards['a/o'] = dict(
ShardRange('a/o', next(ts_iter).internal))
to_add, to_delete = sift_shard_ranges(new_shard_ranges,
existing_shards)
self.assertEqual([sr2], list(to_add))
self.assertFalse(to_delete)
# But if a newer version is in new_shard_ranges then the old will be
# added to to_delete and new is added to to_add.
sr1['timestamp'] = next(ts_iter).internal
to_add, to_delete = sift_shard_ranges(new_shard_ranges,
existing_shards)
self.assertEqual(2, len(to_add))
self.assertIn(sr1, to_add)
self.assertIn(sr2, to_add)
self.assertEqual({'a/o'}, to_delete)
| swift-master | test/unit/container/test_backend.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from textwrap import dedent
import mock
import errno
from swift.common.utils import Timestamp, readconf
from test.debug_logger import debug_logger
from swift.container import sync
from swift.common.db import DatabaseConnectionError
from swift.common import utils
from swift.common.wsgi import ConfigString
from swift.common.exceptions import ClientException
from swift.common.storage_policy import StoragePolicy
import test
from test.unit import patch_policies, with_tempdir
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'endcap'
class FakeRing(object):
def __init__(self):
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in range(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.db_dir = os.path.dirname(path)
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_max_row(self):
return 1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerSync(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('test-container-sync')
def test_FileLikeIter(self):
# Retained test to show new FileLikeIter acts just like the removed
# _Iter2FileLikeObject did.
flo = sync.FileLikeIter(iter([b'123', b'4567', b'89', b'0']))
expect = b'1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEqual(flo.read(), expect)
self.assertEqual(flo.read(), b'')
self.assertEqual(flo.read(2), b'')
flo = sync.FileLikeIter(iter([b'123', b'4567', b'89', b'0']))
self.assertEqual(flo.read(), b'1234567890')
self.assertEqual(flo.read(), b'')
self.assertEqual(flo.read(2), b'')
def assertLogMessage(self, msg_level, expected, skip=0):
for line in self.logger.get_lines_for_level(msg_level)[skip:]:
msg = 'expected %r not in %r' % (expected, line)
self.assertTrue(expected in line, msg)
@with_tempdir
def test_init(self, tempdir):
ic_conf_path = os.path.join(tempdir, 'internal-client.conf')
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
# specified but not exists will not start
conf = {'internal_client_conf_path': ic_conf_path}
self.assertRaises(SystemExit, sync.ContainerSync, conf,
container_ring=cring, logger=self.logger)
# not specified will use default conf
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertTrue(isinstance(conf_path, ConfigString))
self.assertEqual(conf_path.contents.getvalue(),
dedent(sync.ic_conf_body))
self.assertLogMessage('warning', 'internal_client_conf_path')
self.assertLogMessage('warning', 'internal-client.conf-sample')
# correct
contents = dedent(sync.ic_conf_body)
with open(ic_conf_path, 'w') as f:
f.write(contents)
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync(conf, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertEqual(conf_path, ic_conf_path)
sample_conf_filename = os.path.join(
os.path.dirname(test.__file__),
'../etc/internal-client.conf-sample')
actual_conf = readconf(ConfigString(contents))
expected_conf = readconf(sample_conf_filename)
actual_conf.pop('__file__')
expected_conf.pop('__file__')
self.assertEqual(expected_conf, actual_conf)
def test_init_internal_client_log_name(self):
def _do_test_init_ic_log_name(conf, exp_internal_client_log_name):
with mock.patch(
'swift.container.sync.InternalClient') \
as mock_ic:
sync.ContainerSync(conf, container_ring='dummy object')
mock_ic.assert_called_once_with(
'conf-path',
'Swift Container Sync', 3,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
_do_test_init_ic_log_name({'internal_client_conf_path': 'conf-path'},
'container-sync-ic')
_do_test_init_ic_log_name({'internal_client_conf_path': 'conf-path',
'log_name': 'my-container-sync'},
'my-container-sync-ic')
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603] # Elapsed time for "under interval" (yes)
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
gen_func = ('swift.container.sync_store.'
'ContainerSyncStore.synced_containers_generator')
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.time', fake_time), \
mock.patch('swift.container.sync.sleep', fake_sleep), \
mock.patch(gen_func) as fake_generator, \
mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: FakeContainerBroker(p, info={
'account': 'a', 'container': 'c',
'storage_policy_index': 0})):
fake_generator.side_effect = [iter(['container.db']),
iter(['container.db'])]
cs = sync.ContainerSync({}, container_ring=FakeRing())
try:
cs.run_forever()
except Exception as err:
if str(err) != 'we are now done':
raise
self.assertEqual(time_calls, [9])
self.assertEqual(len(sleep_calls), 2)
self.assertLessEqual(sleep_calls[0], cs.interval)
self.assertEqual(cs.interval - 1, sleep_calls[1])
self.assertEqual(2, fake_generator.call_count)
self.assertEqual(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interim report.
time_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605] # For elapsed
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
gen_func = ('swift.container.sync_store.'
'ContainerSyncStore.synced_containers_generator')
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.time', fake_time), \
mock.patch(gen_func) as fake_generator, \
mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: FakeContainerBroker(p, info={
'account': 'a', 'container': 'c',
'storage_policy_index': 0})):
fake_generator.side_effect = [iter(['container.db']),
iter(['container.db'])]
cs = sync.ContainerSync({}, container_ring=FakeRing())
try:
cs.run_once()
self.assertEqual(time_calls, [6])
self.assertEqual(1, fake_generator.call_count)
self.assertEqual(cs.reported, 3602)
cs.run_once()
except Exception as err:
if str(err) != 'we are now done':
raise
self.assertEqual(time_calls, [10])
self.assertEqual(2, fake_generator.call_count)
self.assertEqual(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertEqual(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
broker = 'swift.container.backend.ContainerBroker'
store = 'swift.container.sync_store.ContainerSyncStore'
# In this test we call the container_sync instance several
# times with a missing db in various combinations.
# Since we use the same ContainerSync instance for all tests
# its failures counter increases by one with each call.
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and we succeed in deleting it.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and we fail to delete it.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
fake_remove.side_effect = OSError('1')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 2)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# with DB does not exist, and it returns an error != ENOENT.
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError(
'a',
"DB doesn't exist")
fake_remove.side_effect = OSError(errno.EPERM, 'a')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(1, fake_remove.call_count)
self.assertEqual('isa.db', fake_remove.call_args[0][0].db_file)
# Test the case where get_info returns DatabaseConnectionError
# error different than DB does not exist
with mock.patch(broker + '.get_info') as fake_get_info:
with mock.patch(store + '.remove_synced_container') as fake_remove:
fake_get_info.side_effect = DatabaseConnectionError('a', 'a')
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 4)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(0, fake_remove.call_count)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({
'bind_ip': '10.0.0.0',
}, container_ring=cring)
# Plumbing test for bind_ip and whataremyips()
self.assertEqual(['10.0.0.0'], cs._myips)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 1)
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 2)
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p, logger: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertIsNone(fcb.sync_point1)
self.assertEqual(fcb.sync_point2, -1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker('path', info={'account': 'a',
'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 1,
'x_container_sync_point2': 1},
metadata={'x-container-sync-to':
('http://127.0.0.1/a/c', 1),
'x-container-sync-key':
('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated yet
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, -1)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEqual(cs.container_failures, 2)
self.assertEqual(cs.container_skips, 0)
self.assertIsNone(fcb.sync_point1)
self.assertEqual(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
raise ClientException
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb), \
mock.patch('swift.container.sync.delete_object',
fake_delete_object):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertIsNone(fcb.sync_point1)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb), \
mock.patch('swift.container.sync.delete_object',
lambda *x, **y: None):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertIsNone(fcb.sync_point1)
self.assertEqual(fcb.sync_point2, 1)
def test_container_second_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return b'\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p, logger: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertIsNone(fcb.sync_point2)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return b'\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p, logger: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
# Nevertheless the fault is skipped
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertIsNone(fcb.sync_point2)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p, logger: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertIsNone(fcb.sync_point2)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_report(self):
container_stats = {'puts': 0,
'deletes': 0,
'bytes': 0}
def fake_container_sync_row(self, row, sync_to,
user_key, broker, info, realm, realm_key):
if 'deleted' in row:
container_stats['deletes'] += 1
return True
container_stats['puts'] += 1
container_stats['bytes'] += row['size']
return True
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 5,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o1', 'size': 0,
'deleted': True},
{'ROWID': 2, 'name': 'o2', 'size': 1010},
{'ROWID': 3, 'name': 'o3', 'size': 0,
'deleted': True},
{'ROWID': 4, 'name': 'o4', 'size': 90},
{'ROWID': 5, 'name': 'o5', 'size': 0}])
with mock.patch('swift.container.sync.InternalClient'), \
mock.patch('swift.container.sync.hash_path',
fake_hash_path), \
mock.patch('swift.container.sync.ContainerBroker',
lambda p, logger: fcb):
cring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
cs.container_stats = container_stats
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
with mock.patch.object(cs, 'container_sync_row',
fake_container_sync_row):
cs.container_sync('isa.db')
# Succeeds because no rows match
log_line = cs.logger.get_lines_for_level('info')[0]
lines = log_line.split(',')
self.assertEqual('total_rows: 1', lines.pop().strip())
self.assertEqual('sync_point2: None', lines.pop().strip())
self.assertEqual('sync_point1: 5', lines.pop().strip())
self.assertEqual('bytes: 0', lines.pop().strip())
self.assertEqual('deletes: 0', lines.pop().strip())
self.assertEqual('posts: 0', lines.pop().strip())
self.assertEqual('puts: 0', lines.pop().strip())
def test_container_sync_row_delete(self):
self._test_container_sync_row_delete(None, None)
def test_container_sync_row_delete_using_realms(self):
self._test_container_sync_row_delete('US', 'realm_key')
def _test_container_sync_row_delete(self, realm, realm_key):
orig_uuid = sync.uuid
orig_delete_object = sync.delete_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
ts_data = Timestamp(1.1)
def fake_delete_object(path, name=None, headers=None, proxy=None,
logger=None, timeout=None):
self.assertEqual(path, 'http://sync/to/path')
self.assertEqual(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef a2401ecb1256f469494a0abcb0eb62ffa73eca63',
'x-timestamp': ts_data.internal})
else:
self.assertEqual(
headers,
{'x-container-sync-key': 'key',
'x-timestamp': ts_data.internal})
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.delete_object = fake_delete_object
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
# Success.
# simulate a row with tombstone at 1.1 and later ctype, meta times
created_at = ts_data.internal + '+1388+1388' # last modified = 1.2
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': created_at,
'size': '1000'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
exc = []
def fake_delete_object(*args, **kwargs):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 2)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 2)
self.assertEqual(len(exc), 3)
self.assertEqual(str(exc[-1]), 'test client exception: 404')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception',
http_status=409))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because our tombstone is out of date
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 3)
self.assertEqual(len(exc), 4)
self.assertEqual(str(exc[-1]), 'test client exception: 409')
finally:
sync.uuid = orig_uuid
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
self._test_container_sync_row_put(None, None)
def test_container_sync_row_put_using_realms(self):
self._test_container_sync_row_put('US', 'realm_key')
def _test_container_sync_row_put(self, realm, realm_key):
orig_uuid = sync.uuid
orig_put_object = sync.put_object
orig_head_object = sync.head_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
ts_data = Timestamp(1.1)
timestamp = Timestamp(1.2)
put_object_calls = []
def fake_put_object(*args, **kwargs):
put_object_calls.append((args, kwargs))
def check_put_object(extra_headers, sync_to, name=None,
headers=None, contents=None, proxy=None,
logger=None, timeout=None):
self.assertEqual(sync_to, 'http://sync/to/path')
self.assertEqual(name, 'object')
expected_headers = {
'x-timestamp': timestamp.internal,
'etag': 'etagvalue',
'other-header': 'other header value',
'content-type': 'text/plain'}
if realm:
expected_headers.update({
'x-container-sync-auth':
'US abcdef a5fb3cf950738e6e3b364190e246bd7dd21dad3c'})
else:
expected_headers.update({
'x-container-sync-key': 'key'})
expected_headers.update(extra_headers)
self.assertDictEqual(expected_headers, headers)
self.assertEqual(contents.read(), b'contents')
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.put_object = fake_put_object
expected_put_count = 0
excepted_failure_count = 0
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'other-header': 'other header value',
'etag': '"etagvalue"',
'x-timestamp': timestamp.internal,
'content-type': 'text/plain; swift_bytes=123'},
iter([b'contents']))
cs.swift.get_object = fake_get_object
# Success as everything says it worked.
# simulate a row with data at 1.1 and later ctype, meta times
created_at = ts_data.internal + '+1388+1388' # last modified = 1.2
def fake_object_in_rcontainer(row, sync_to, user_key,
broker, realm, realm_key):
return False
orig_object_in_rcontainer = cs._object_in_remote_container
cs._object_in_remote_container = fake_object_in_rcontainer
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': created_at,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(1, len(put_object_calls))
check_put_object({'etag': 'etagvalue'},
*put_object_calls[0][0], **put_object_calls[0][1])
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': timestamp.internal,
'other-header': 'other header value',
'etag': '"etagvalue"',
'content-type': 'text/plain; swift_bytes=123'},
iter([b'contents']))
cs.swift.get_object = fake_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
put_object_calls = []
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 60}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(1, len(put_object_calls))
check_put_object({'etag': 'etagvalue'},
*put_object_calls[0][0], **put_object_calls[0][1])
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
# Success as everything says it worked, also check that PUT
# timestamp equals GET timestamp when it is newer than created_at
# value.
put_object_calls = []
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.1',
'size': 60}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(1, len(put_object_calls))
check_put_object({'etag': 'etagvalue'},
*put_object_calls[0][0], **put_object_calls[0][1])
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': timestamp.internal,
'other-header': 'other header value',
'etag': '"etagvalue"',
'x-static-large-object': 'true',
'content-type': 'text/plain; swift_bytes=123'},
iter([b'contents']))
cs.swift.get_object = fake_get_object
# Success as everything says it worked, also check that etag
# header removed in case of SLO
put_object_calls = []
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.1',
'size': 60}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(1, len(put_object_calls))
check_put_object({'x-static-large-object': 'true'},
*put_object_calls[0][0], **put_object_calls[0][1])
expected_put_count += 1
self.assertEqual(cs.container_puts, expected_put_count)
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(Exception('test exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 70}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(ClientException('test client exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 80}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_get_object(acct, con, obj, headers, acceptable_statuses,
params=None):
self.assertEqual({'symlink': 'get'}, params)
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200, {'other-header': 'other header value',
'x-timestamp': timestamp.internal,
'etag': '"etagvalue"'},
iter([b'contents']))
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
cs.swift.get_object = fake_get_object
sync.put_object = fake_put_object
# Fail due to 401
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 90}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('info', 'Unauth')
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('info', 'Not found', 1)
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'size': 50}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, expected_put_count)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
self.assertLogMessage('error', 'ERROR Syncing')
# Test the following cases:
# remote has the same date and a put doesn't take place
# remote has more up to date copy and a put doesn't take place
# head_object returns ClientException(404) and a put takes place
# head_object returns other ClientException put doesn't take place
# and we get failure
# head_object returns other Exception put does not take place
# and we get failure
# remote returns old copy and a put takes place
test_row = {'deleted': False,
'name': 'object',
'created_at': timestamp.internal,
'etag': '1111',
'size': 10}
test_info = {'account': 'a',
'container': 'c',
'storage_policy_index': 0}
actual_puts = []
def fake_put_object(*args, **kwargs):
actual_puts.append((args, kwargs))
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.2'}, '')
sync.put_object = fake_put_object
sync.head_object = fake_head_object
cs._object_in_remote_container = orig_object_in_rcontainer
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info,
realm, realm_key))
# No additional put has taken place
self.assertEqual(len(actual_puts), 0)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.3'}, '')
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info,
realm, realm_key))
# No additional put has taken place
self.assertEqual(len(actual_puts), 0)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
actual_puts = []
def fake_head_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# Additional put has taken place
self.assertEqual(len(actual_puts), 1)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
sync.head_object = fake_head_object
self.assertFalse(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# No additional put has taken place, failures increased
self.assertEqual(len(actual_puts), 1)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
raise Exception()
sync.head_object = fake_head_object
self.assertFalse(cs.container_sync_row(
test_row,
'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# No additional put has taken place, failures increased
self.assertEqual(len(actual_puts), 1)
excepted_failure_count += 1
self.assertEqual(cs.container_failures, excepted_failure_count)
def fake_head_object(*args, **kwargs):
return ({'x-timestamp': '1.1'}, '')
sync.head_object = fake_head_object
self.assertTrue(cs.container_sync_row(
test_row, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
test_info, realm, realm_key))
# Additional put has taken place
self.assertEqual(len(actual_puts), 2)
# No additional errors
self.assertEqual(cs.container_failures, excepted_failure_count)
finally:
sync.uuid = orig_uuid
sync.put_object = orig_put_object
sync.head_object = orig_head_object
def test_select_http_proxy_None(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': ''}, container_ring=FakeRing())
self.assertIsNone(cs.select_http_proxy())
def test_select_http_proxy_one(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one'}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), 'http://one')
def test_select_http_proxy_multiple(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one,http://two,http://three'},
container_ring=FakeRing())
self.assertEqual(
set(cs.http_proxies),
set(['http://one', 'http://two', 'http://three']))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_sync.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import six.moves.cPickle as pickle
import mock
import os
import unittest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from test.debug_logger import debug_logger
from test.unit import mock_check_drive
from eventlet import spawn, Timeout
from swift.common import exceptions, utils
from swift.container import updater as container_updater
from swift.container.backend import ContainerBroker, DATADIR
from swift.common.ring import RingData
from swift.common.utils import normalize_timestamp
from test import listen_zero
class TestContainerUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
ring_file = os.path.join(self.testdir, 'account.ring.gz')
with closing(GzipFile(ring_file, 'wb')) as f:
pickle.dump(
RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'ip': '127.0.0.2', 'port': 12345,
'replication_ip': '127.0.0.1',
# replication_port may be overridden in tests but
# include here for completeness...
'replication_port': 67890,
'device': 'sda1', 'zone': 0},
{'id': 1, 'ip': '127.0.0.2', 'port': 12345,
'replication_ip': '127.0.0.1',
'replication_port': 67890,
'device': 'sda1', 'zone': 2}], 30),
f)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
self.logger = debug_logger('test')
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def _get_container_updater(self, conf_updates=None):
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
}
if conf_updates:
conf.update(conf_updates)
return container_updater.ContainerUpdater(conf, logger=self.logger)
def test_creation(self):
cu = self._get_container_updater({'concurrency': '2',
'node_timeout': '5.5'})
self.assertTrue(hasattr(cu, 'logger'))
self.assertTrue(cu.logger is not None)
self.assertEqual(cu.devices, self.devices_dir)
self.assertEqual(cu.interval, 1)
self.assertEqual(cu.concurrency, 2)
self.assertEqual(cu.node_timeout, 5.5)
self.assertEqual(cu.account_suppression_time, 0)
self.assertTrue(cu.get_account_ring() is not None)
def test_conf_params(self):
# defaults
daemon = container_updater.ContainerUpdater({})
self.assertEqual(daemon.devices, '/srv/node')
self.assertEqual(daemon.mount_check, True)
self.assertEqual(daemon.swift_dir, '/etc/swift')
self.assertEqual(daemon.interval, 300)
self.assertEqual(daemon.concurrency, 4)
self.assertEqual(daemon.max_containers_per_second, 50.0)
# non-defaults
conf = {
'devices': '/some/where/else',
'mount_check': 'huh?',
'swift_dir': '/not/here',
'interval': '600.1',
'concurrency': '2',
'containers_per_second': '10.5',
}
daemon = container_updater.ContainerUpdater(conf)
self.assertEqual(daemon.devices, '/some/where/else')
self.assertEqual(daemon.mount_check, False)
self.assertEqual(daemon.swift_dir, '/not/here')
self.assertEqual(daemon.interval, 600.1)
self.assertEqual(daemon.concurrency, 2)
self.assertEqual(daemon.max_containers_per_second, 10.5)
# check deprecated option
daemon = container_updater.ContainerUpdater({'slowdown': '0.04'})
self.assertEqual(daemon.max_containers_per_second, 20.0)
def check_bad(conf):
with self.assertRaises(ValueError):
container_updater.ContainerUpdater(conf)
check_bad({'interval': 'foo'})
check_bad({'concurrency': 'bar'})
check_bad({'concurrency': '1.0'})
check_bad({'slowdown': 'baz'})
check_bad({'containers_per_second': 'quux'})
@mock.patch.object(container_updater.ContainerUpdater, 'container_sweep')
def test_run_once_with_device_unmounted(self, mock_sweep):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
partition_dir = os.path.join(containers_dir, "a")
os.mkdir(partition_dir)
cu.run_once()
self.assertTrue(os.path.exists(containers_dir)) # sanity check
# only called if a partition dir exists
self.assertTrue(mock_sweep.called)
mock_sweep.reset_mock()
cu = self._get_container_updater({'mount_check': 'true'})
with mock_check_drive():
cu.run_once()
log_lines = self.logger.get_lines_for_level('warning')
self.assertGreater(len(log_lines), 0)
msg = '%s is not mounted' % self.sda1
self.assertEqual(log_lines[0], msg)
# Ensure that the container_sweep did not run
self.assertFalse(mock_sweep.called)
@mock.patch('swift.container.updater.dump_recon_cache')
def test_run_once_with_get_info_timeout(self, mock_dump_recon):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
db_file = os.path.join(subdir, 'hash.db')
cb = ContainerBroker(db_file, account='a', container='c')
cb.initialize(normalize_timestamp(1), 0)
timeout = exceptions.LockTimeout(10, db_file)
timeout.cancel()
with mock.patch('swift.container.updater.ContainerBroker.get_info',
side_effect=timeout):
cu.run_once()
log_lines = self.logger.get_lines_for_level('info')
self.assertIn('Failed to get container info (Lock timeout: '
'10 seconds: %s); skipping.' % db_file, log_lines)
@mock.patch('swift.container.updater.dump_recon_cache')
@mock.patch('swift.container.updater.ContainerUpdater.process_container',
side_effect=Exception('Boom!'))
def test_error_in_process(self, mock_process, mock_dump_recon):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c', pending_timeout=1)
cb.initialize(normalize_timestamp(1), 0)
cu.run_once()
log_lines = self.logger.get_lines_for_level('error')
self.assertTrue(log_lines)
self.assertIn('Error processing container ', log_lines[0])
self.assertIn('devices/sda1/containers/subdir/hash.db', log_lines[0])
self.assertIn('Boom!', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertEqual(1, len(mock_dump_recon.mock_calls))
@mock.patch('swift.container.updater.dump_recon_cache')
def test_run_once(self, mock_recon):
cu = self._get_container_updater()
cu.run_once()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assertTrue(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c')
cb.initialize(normalize_timestamp(1), 0)
self.assertTrue(cb.is_root_container())
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/0/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = inc.readline()
self.assertIn(b'x-put-timestamp', headers)
self.assertIn(b'x-delete-timestamp', headers)
self.assertIn(b'x-object-count', headers)
self.assertIn(b'x-bytes-used', headers)
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen_zero()
def spawn_accepts():
events = []
for _junk in range(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['replication_port'] = bindsock.getsockname()[1]
cu.run_once()
with Timeout(5):
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
@mock.patch('os.listdir')
def test_listdir_with_exception(self, mock_listdir):
e = OSError('permission_denied')
mock_listdir.side_effect = e
cu = self._get_container_updater()
paths = cu.get_paths()
self.assertEqual(paths, [])
log_lines = self.logger.get_lines_for_level('error')
msg = ('ERROR: Failed to get paths to drive partitions: '
'permission_denied')
self.assertEqual(log_lines[0], msg)
@mock.patch('os.listdir', return_value=['foo', 'bar'])
def test_listdir_without_exception(self, mock_listdir):
cu = self._get_container_updater()
path = cu._listdir('foo/bar/')
self.assertEqual(path, ['foo', 'bar'])
log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 0)
def test_unicode(self):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='\xce\xa9')
cb.initialize(normalize_timestamp(1), 0)
obj_name = u'\N{GREEK CAPITAL LETTER OMEGA}'
if six.PY2:
obj_name = obj_name.encode('utf-8')
cb.put_object(obj_name, normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
def accept(sock, addr):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
out.flush()
inc.read()
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen_zero()
def spawn_accepts():
events = []
for _junk in range(2):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['replication_port'] = bindsock.getsockname()[1]
cu.run_once()
with Timeout(5):
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
def test_old_style_shard_container(self):
cu = self._get_container_updater()
cu.run_once()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assertTrue(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'),
account='.shards_a', container='c')
cb.initialize(normalize_timestamp(1), 0)
cb.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(cb.is_root_container())
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_put_timestamp'], '0')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
# Fake us having already reported *bad* stats under swift 2.18.0
cb.reported('0', '0', 1, 3)
# Should fail with a bunch of connection-refused
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_put_timestamp'], '0')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/2/.shards_a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = inc.readline()
self.assertIn(b'x-put-timestamp', headers)
self.assertIn(b'x-delete-timestamp', headers)
self.assertIn(b'x-object-count', headers)
self.assertIn(b'x-bytes-used', headers)
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen_zero()
def spawn_accepts():
events = []
for _junk in range(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['replication_port'] = bindsock.getsockname()[1]
cu.run_once()
with Timeout(5):
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_put_timestamp'], '0000000001.00000')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
def test_shard_container(self):
cu = self._get_container_updater()
cu.run_once()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assertTrue(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'),
account='.shards_a', container='c')
cb.initialize(normalize_timestamp(1), 0)
cb.set_sharding_sysmeta('Quoted-Root', 'a/c')
self.assertFalse(cb.is_root_container())
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_put_timestamp'], '0')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
# Fake us having already reported *bad* stats under swift 2.18.0
cb.reported('0', '0', 1, 3)
# Should fail with a bunch of connection-refused
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_put_timestamp'], '0')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/2/.shards_a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = inc.readline()
self.assertIn(b'x-put-timestamp', headers)
self.assertIn(b'x-delete-timestamp', headers)
self.assertIn(b'x-object-count', headers)
self.assertIn(b'x-bytes-used', headers)
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen_zero()
def spawn_accepts():
events = []
for _junk in range(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['replication_port'] = bindsock.getsockname()[1]
cu.run_once()
with Timeout(5):
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_put_timestamp'], '0000000001.00000')
self.assertEqual(info['reported_delete_timestamp'], '0')
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_updater.py |
swift-master | test/unit/container/__init__.py |
|
# Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
from argparse import Namespace
import eventlet
import os
import shutil
from contextlib import contextmanager
from tempfile import mkdtemp
from uuid import uuid4
import mock
import unittest
from collections import defaultdict
import time
from copy import deepcopy
import six
from swift.common import internal_client
from swift.container import replicator
from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \
SHARDED, DATADIR
from swift.container.sharder import ContainerSharder, sharding_enabled, \
CleavingContext, DEFAULT_SHARDER_CONF, finalize_shrinking, \
find_shrinking_candidates, process_compactible_shard_sequences, \
find_compactible_shard_sequences, is_shrinking_candidate, \
is_sharding_candidate, find_paths, rank_paths, ContainerSharderConf, \
find_paths_with_gaps, combine_shard_ranges, find_overlapping_ranges, \
update_own_shard_range_stats
from swift.common.utils import ShardRange, Timestamp, hash_path, \
encode_timestamps, parse_db_filename, quorum_size, Everything, md5, \
ShardName
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import FakeRing, make_timestamp_iter, unlink_files, \
mocked_http_conn, mock_timestamp_now, mock_timestamp_now_with_iter, \
attach_fake_replication_rpc
class BaseTestSharder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.ts_iter = make_timestamp_iter()
self.logger = debug_logger('sharder-test')
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def _assert_shard_ranges_equal(self, expected, actual):
self.assertEqual([dict(sr) for sr in expected],
[dict(sr) for sr in actual])
def _make_broker(self, account='a', container='c', epoch=None,
device='sda', part=0, hash_=None, put_timestamp=None):
hash_ = hash_ or md5(
container.encode('utf-8'), usedforsecurity=False).hexdigest()
datadir = os.path.join(
self.tempdir, device, 'containers', str(part), hash_[-3:], hash_)
if epoch:
filename = '%s_%s.db' % (hash_, epoch)
else:
filename = hash_ + '.db'
db_file = os.path.join(datadir, filename)
broker = ContainerBroker(
db_file, account=account, container=container,
logger=self.logger)
broker.initialize(put_timestamp=put_timestamp)
return broker
def _make_old_style_sharding_broker(self, account='a', container='c',
shard_bounds=(('', 'middle'),
('middle', ''))):
broker = self._make_broker(account=account, container=container)
broker.set_sharding_sysmeta('Root', 'a/c')
old_db_id = broker.get_info()['id']
broker.enable_sharding(next(self.ts_iter))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CLEAVED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
broker = ContainerBroker(broker.db_file, account='a', container='c')
self.assertNotEqual(old_db_id, broker.get_info()['id']) # sanity check
return broker
def _make_sharding_broker(self, account='a', container='c',
shard_bounds=(('', 'middle'), ('middle', ''))):
broker = self._make_broker(account=account, container=container)
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
old_db_id = broker.get_info()['id']
broker.enable_sharding(next(self.ts_iter))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CLEAVED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
broker = ContainerBroker(broker.db_file, account='a', container='c')
self.assertNotEqual(old_db_id, broker.get_info()['id']) # sanity check
return broker
def _make_shrinking_broker(self, account='.shards_a', container='shard_c',
lower='here', upper='there', objects=None):
# caller should merge any acceptor range(s) into returned broker
broker = self._make_broker(account=account, container=container)
for obj in objects or []:
broker.put_object(*obj)
own_shard_range = ShardRange(
broker.path, next(self.ts_iter), lower, upper,
state=ShardRange.SHRINKING, epoch=next(self.ts_iter))
broker.merge_shard_ranges([own_shard_range])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container()) # sanity check
self.assertTrue(broker.set_sharding_state())
return broker
def _make_shard_ranges(self, bounds, state=None, object_count=0,
timestamp=Timestamp.now(), **kwargs):
if not isinstance(state, (tuple, list)):
state = [state] * len(bounds)
state_iter = iter(state)
return [ShardRange('.shards_a/c_%s_%s' % (upper, index), timestamp,
lower, upper, state=next(state_iter),
object_count=object_count, **kwargs)
for index, (lower, upper) in enumerate(bounds)]
def ts_encoded(self):
# make a unique timestamp string with multiple timestamps encoded;
# use different deltas between component timestamps
timestamps = [next(self.ts_iter) for i in range(4)]
return encode_timestamps(
timestamps[0], timestamps[1], timestamps[3])
class TestSharder(BaseTestSharder):
def _do_test_init(self, conf, expected, use_logger=True):
logger = self.logger if use_logger else None
if logger:
logger.clear()
with mock.patch(
'swift.container.sharder.internal_client.InternalClient') \
as mock_ic:
with mock.patch('swift.common.db_replicator.ring.Ring') \
as mock_ring:
mock_ring.return_value = mock.MagicMock()
mock_ring.return_value.replica_count = 3
sharder = ContainerSharder(conf, logger=logger)
mock_ring.assert_called_once_with(
'/etc/swift', ring_name='container')
for k, v in expected.items():
self.assertTrue(hasattr(sharder, k), 'Missing attr %s' % k)
self.assertEqual(v, getattr(sharder, k),
'Incorrect value: expected %s=%s but got %s' %
(k, v, getattr(sharder, k)))
return sharder, mock_ic
def test_init(self):
# default values
expected = {
'mount_check': True, 'bind_ip': '0.0.0.0', 'port': 6201,
'per_diff': 1000, 'max_diffs': 100, 'interval': 30,
'databases_per_second': 50,
'cleave_row_batch_size': 10000,
'node_timeout': 10, 'conn_timeout': 5,
'rsync_compress': False,
'rsync_module': '{replication_ip}::container',
'reclaim_age': 86400 * 7,
'shard_container_threshold': 1000000,
'rows_per_shard': 500000,
'shrink_threshold': 100000,
'expansion_limit': 750000,
'cleave_batch_size': 2,
'shard_scanner_batch_size': 10,
'rcache': '/var/cache/swift/container.recon',
'shards_account_prefix': '.shards_',
'auto_shard': False,
'recon_candidates_limit': 5,
'recon_sharded_timeout': 43200,
'shard_replication_quorum': 2,
'existing_shard_replication_quorum': 2,
'max_shrinking': 1,
'max_expanding': -1,
'stats_interval': 3600,
}
sharder, mock_ic = self._do_test_init({}, expected, use_logger=False)
self.assertEqual(
'container-sharder', sharder.logger.logger.name)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
# non-default shard_container_threshold influences other defaults
conf = {'shard_container_threshold': 20000000}
expected.update({
'shard_container_threshold': 20000000,
'shrink_threshold': 2000000,
'expansion_limit': 15000000,
'rows_per_shard': 10000000
})
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
# non-default values
conf = {
'mount_check': False, 'bind_ip': '10.11.12.13', 'bind_port': 62010,
'per_diff': 2000, 'max_diffs': 200, 'interval': 60,
'databases_per_second': 5,
'cleave_row_batch_size': 3000,
'node_timeout': 20, 'conn_timeout': 1,
'rsync_compress': True,
'rsync_module': '{replication_ip}::container_sda/',
'reclaim_age': 86400 * 14,
'shrink_threshold': 2000000,
'expansion_limit': 17000000,
'shard_container_threshold': 20000000,
'cleave_batch_size': 4,
'shard_scanner_batch_size': 8,
'request_tries': 2,
'internal_client_conf_path': '/etc/swift/my-sharder-ic.conf',
'recon_cache_path': '/var/cache/swift-alt',
'auto_create_account_prefix': '...',
'auto_shard': 'yes',
'recon_candidates_limit': 10,
'recon_sharded_timeout': 7200,
'shard_replication_quorum': 1,
'existing_shard_replication_quorum': 0,
'max_shrinking': 5,
'max_expanding': 4,
'rows_per_shard': 13000000,
'stats_interval': 300,
}
expected = {
'mount_check': False, 'bind_ip': '10.11.12.13', 'port': 62010,
'per_diff': 2000, 'max_diffs': 200, 'interval': 60,
'databases_per_second': 5,
'cleave_row_batch_size': 3000,
'node_timeout': 20, 'conn_timeout': 1,
'rsync_compress': True,
'rsync_module': '{replication_ip}::container_sda',
'reclaim_age': 86400 * 14,
'shard_container_threshold': 20000000,
'rows_per_shard': 13000000,
'shrink_threshold': 2000000,
'expansion_limit': 17000000,
'cleave_batch_size': 4,
'shard_scanner_batch_size': 8,
'rcache': '/var/cache/swift-alt/container.recon',
'shards_account_prefix': '...shards_',
'auto_shard': True,
'recon_candidates_limit': 10,
'recon_sharded_timeout': 7200,
'shard_replication_quorum': 1,
'existing_shard_replication_quorum': 0,
'max_shrinking': 5,
'max_expanding': 4,
'stats_interval': 300,
}
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/my-sharder-ic.conf', 'Swift Container Sharder', 2,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'])
expected.update({'shard_replication_quorum': 3,
'existing_shard_replication_quorum': 3})
conf.update({'shard_replication_quorum': 4,
'existing_shard_replication_quorum': 4})
self._do_test_init(conf, expected)
warnings = self.logger.get_lines_for_level('warning')
self.assertEqual(warnings[:1], [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'])
self.assertEqual(warnings[1:], [
'shard_replication_quorum of 4 exceeds replica count 3, '
'reducing to 3',
'existing_shard_replication_quorum of 4 exceeds replica count 3, '
'reducing to 3',
])
with self.assertRaises(ValueError) as cm:
self._do_test_init({'shard_shrink_point': 101}, {})
self.assertIn(
'greater than 0, less than 100, not "101"', str(cm.exception))
with self.assertRaises(ValueError) as cm:
self._do_test_init({'shard_shrink_merge_point': 101}, {})
self.assertIn(
'greater than 0, less than 100, not "101"', str(cm.exception))
def test_init_deprecated_options(self):
# percent values applied if absolute values not given
conf = {
'shard_shrink_point': 7, # trumps shrink_threshold
'shard_shrink_merge_point': 95, # trumps expansion_limit
'shard_container_threshold': 20000000,
}
expected = {
'mount_check': True, 'bind_ip': '0.0.0.0', 'port': 6201,
'per_diff': 1000, 'max_diffs': 100, 'interval': 30,
'databases_per_second': 50,
'cleave_row_batch_size': 10000,
'node_timeout': 10, 'conn_timeout': 5,
'rsync_compress': False,
'rsync_module': '{replication_ip}::container',
'reclaim_age': 86400 * 7,
'shard_container_threshold': 20000000,
'rows_per_shard': 10000000,
'shrink_threshold': 1400000,
'expansion_limit': 19000000,
'cleave_batch_size': 2,
'shard_scanner_batch_size': 10,
'rcache': '/var/cache/swift/container.recon',
'shards_account_prefix': '.shards_',
'auto_shard': False,
'recon_candidates_limit': 5,
'shard_replication_quorum': 2,
'existing_shard_replication_quorum': 2,
'max_shrinking': 1,
'max_expanding': -1
}
self._do_test_init(conf, expected)
# absolute values override percent values
conf = {
'shard_shrink_point': 7,
'shrink_threshold': 1300000, # trumps shard_shrink_point
'shard_shrink_merge_point': 95,
'expansion_limit': 17000000, # trumps shard_shrink_merge_point
'shard_container_threshold': 20000000,
}
expected = {
'mount_check': True, 'bind_ip': '0.0.0.0', 'port': 6201,
'per_diff': 1000, 'max_diffs': 100, 'interval': 30,
'databases_per_second': 50,
'cleave_row_batch_size': 10000,
'node_timeout': 10, 'conn_timeout': 5,
'rsync_compress': False,
'rsync_module': '{replication_ip}::container',
'reclaim_age': 86400 * 7,
'shard_container_threshold': 20000000,
'rows_per_shard': 10000000,
'shrink_threshold': 1300000,
'expansion_limit': 17000000,
'cleave_batch_size': 2,
'shard_scanner_batch_size': 10,
'rcache': '/var/cache/swift/container.recon',
'shards_account_prefix': '.shards_',
'auto_shard': False,
'recon_candidates_limit': 5,
'shard_replication_quorum': 2,
'existing_shard_replication_quorum': 2,
'max_shrinking': 1,
'max_expanding': -1
}
self._do_test_init(conf, expected)
def test_init_internal_client_conf_loading_error(self):
with mock.patch('swift.common.db_replicator.ring.Ring') \
as mock_ring:
mock_ring.return_value = mock.MagicMock()
mock_ring.return_value.replica_count = 3
with self.assertRaises(SystemExit) as cm:
ContainerSharder(
{'internal_client_conf_path':
os.path.join(self.tempdir, 'nonexistent')})
self.assertIn('Unable to load internal client', str(cm.exception))
with mock.patch('swift.common.db_replicator.ring.Ring') \
as mock_ring:
mock_ring.return_value = mock.MagicMock()
mock_ring.return_value.replica_count = 3
with mock.patch(
'swift.container.sharder.internal_client.InternalClient',
side_effect=Exception('kaboom')):
with self.assertRaises(Exception) as cm:
ContainerSharder({})
self.assertIn('kaboom', str(cm.exception))
def test_init_internal_client_log_name(self):
def _do_test_init_ic_log_name(conf, exp_internal_client_log_name):
with mock.patch(
'swift.container.sharder.internal_client.InternalClient') \
as mock_ic:
with mock.patch('swift.common.db_replicator.ring.Ring') \
as mock_ring:
mock_ring.return_value = mock.MagicMock()
mock_ring.return_value.replica_count = 3
ContainerSharder(conf)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf',
'Swift Container Sharder', 3,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
_do_test_init_ic_log_name({}, 'container-sharder-ic')
_do_test_init_ic_log_name({'log_name': 'container-sharder-6021'},
'container-sharder-6021-ic')
def test_log_broker(self):
broker = self._make_broker(container='c@d')
def do_test(level):
with self._mock_sharder() as sharder:
func = getattr(sharder, level)
func(broker, 'bonjour %s %s', 'mes', 'amis')
func(broker, 'hello my %s', 'friend%04ds')
func(broker, 'greetings friend%04ds')
self.assertEqual(
['bonjour mes amis, path: a/c%40d, db: ' + broker.db_file,
'hello my friend%04ds, path: a/c%40d, db: ' + broker.db_file,
'greetings friend%04ds, path: a/c%40d, db: ' + broker.db_file
], sharder.logger.get_lines_for_level(level))
for log_level, lines in sharder.logger.all_log_lines().items():
if log_level == level:
continue
else:
self.assertFalse(lines)
do_test('debug')
do_test('info')
do_test('warning')
do_test('error')
def test_log_broker_exception(self):
broker = self._make_broker()
with self._mock_sharder() as sharder:
try:
raise ValueError('test')
except ValueError as err:
sharder.exception(broker, 'exception: %s', err)
self.assertEqual(
['exception: test, path: a/c, db: %s: ' % broker.db_file],
sharder.logger.get_lines_for_level('error'))
for log_level, lines in sharder.logger.all_log_lines().items():
if log_level == 'error':
continue
else:
self.assertFalse(lines)
def test_log_broker_levels(self):
# verify that the broker is not queried if the log level is not enabled
broker = self._make_broker()
# erase cached properties...
broker.account = broker.container = None
with self._mock_sharder() as sharder:
with mock.patch.object(sharder.logger, 'isEnabledFor',
return_value=False):
sharder.debug(broker, 'test')
sharder.info(broker, 'test')
sharder.warning(broker, 'test')
sharder.error(broker, 'test')
# cached properties have not been set...
self.assertIsNone(broker.account)
self.assertIsNone(broker.container)
self.assertFalse(sharder.logger.all_log_lines())
def test_log_broker_exception_while_logging(self):
broker = self._make_broker()
def do_test(level):
with self._mock_sharder() as sharder:
func = getattr(sharder, level)
with mock.patch.object(broker, '_populate_instance_cache',
side_effect=Exception()):
func(broker, 'bonjour %s %s', 'mes', 'amis')
broker._db_files = None
with mock.patch.object(broker, 'reload_db_files',
side_effect=Exception()):
func(broker, 'bonjour %s %s', 'mes', 'amis')
self.assertEqual(
['bonjour mes amis, path: , db: %s' % broker.db_file,
'bonjour mes amis, path: a/c, db: '],
sharder.logger.get_lines_for_level(level))
for log_level, lines in sharder.logger.all_log_lines().items():
if log_level == level:
continue
else:
self.assertFalse(lines)
do_test('debug')
do_test('info')
do_test('warning')
do_test('error')
def _assert_stats(self, expected, sharder, category):
# assertEqual doesn't work with a stats defaultdict so copy to a dict
# before comparing
stats = sharder.stats['sharding'][category]
actual = {}
for k, v in expected.items():
actual[k] = stats[k]
self.assertEqual(expected, actual)
return stats
def _assert_recon_stats(self, expected, sharder, category):
with open(sharder.rcache, 'rb') as fd:
recon = json.load(fd)
stats = recon['sharding_stats']['sharding'].get(category)
self.assertEqual(expected, stats)
def test_increment_stats(self):
with self._mock_sharder() as sharder:
sharder._increment_stat('visited', 'success')
sharder._increment_stat('visited', 'success')
sharder._increment_stat('visited', 'failure')
sharder._increment_stat('visited', 'completed')
sharder._increment_stat('cleaved', 'success')
expected = {'success': 2,
'failure': 1,
'completed': 1}
self._assert_stats(expected, sharder, 'visited')
self._assert_stats({'success': 1}, sharder, 'cleaved')
def test_increment_stats_with_statsd(self):
with self._mock_sharder() as sharder:
sharder._increment_stat('visited', 'success', statsd=True)
sharder._increment_stat('visited', 'success', statsd=True)
sharder._increment_stat('visited', 'failure', statsd=True)
sharder._increment_stat('visited', 'failure', statsd=False)
sharder._increment_stat('visited', 'completed')
expected = {'success': 2,
'failure': 2,
'completed': 1}
self._assert_stats(expected, sharder, 'visited')
counts = sharder.logger.statsd_client.get_stats_counts()
self.assertEqual(2, counts.get('visited_success'))
self.assertEqual(1, counts.get('visited_failure'))
self.assertIsNone(counts.get('visited_completed'))
def test_update_stat(self):
with self._mock_sharder() as sharder:
sharder._update_stat('scanned', 'found', step=4)
self._assert_stats({'found': 4}, sharder, 'scanned')
with self._mock_sharder() as sharder:
sharder._update_stat('scanned', 'found', step=4)
sharder._update_stat('misplaced', 'placed', step=456, statsd=True)
self._assert_stats({'found': 4}, sharder, 'scanned')
self._assert_stats({'placed': 456}, sharder, 'misplaced')
self.assertEqual({'misplaced_placed': 456},
sharder.logger.statsd_client.get_stats_counts())
def test_run_forever(self):
conf = {'recon_cache_path': self.tempdir,
'devices': self.tempdir}
with self._mock_sharder(conf) as sharder:
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
sharder.logger.clear()
brokers = []
for container in ('c1', 'c2'):
broker = self._make_broker(
container=container, hash_=container + 'hash',
device=sharder.ring.devs[0]['device'], part=0)
broker.update_metadata({'X-Container-Sysmeta-Sharding':
('true', next(self.ts_iter).internal)})
brokers.append(broker)
fake_stats = {
'scanned': {'attempted': 1, 'success': 1, 'failure': 0,
'found': 2, 'min_time': 99, 'max_time': 123},
'created': {'attempted': 1, 'success': 1, 'failure': 1},
'cleaved': {'attempted': 1, 'success': 1, 'failure': 0,
'min_time': 0.01, 'max_time': 1.3},
'misplaced': {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 1, 'unplaced': 0},
'audit_root': {'attempted': 5, 'success': 4, 'failure': 1,
'num_overlap': 0, "has_overlap": 0},
'audit_shard': {'attempted': 2, 'success': 2, 'failure': 0},
}
# NB these are time increments not absolute times...
fake_periods = [1, 2, 3, 3600, 4, 15, 15, 0]
fake_periods_iter = iter(fake_periods)
recon_data = []
fake_process_broker_calls = []
def mock_dump_recon_cache(data, *args):
recon_data.append(deepcopy(data))
with mock.patch('swift.container.sharder.time.time') as fake_time:
def fake_process_broker(broker, *args, **kwargs):
# increment time and inject some fake stats
fake_process_broker_calls.append((broker, args, kwargs))
try:
fake_time.return_value += next(fake_periods_iter)
except StopIteration:
# bail out
fake_time.side_effect = Exception('Test over')
sharder.stats['sharding'].update(fake_stats)
with mock.patch(
'swift.container.sharder.time.sleep') as mock_sleep:
with mock.patch(
'swift.container.sharder.is_sharding_candidate',
return_value=True):
with mock.patch(
'swift.container.sharder.dump_recon_cache',
mock_dump_recon_cache):
fake_time.return_value = next(fake_periods_iter)
sharder._is_sharding_candidate = lambda x: True
sharder._process_broker = fake_process_broker
with self.assertRaises(Exception) as cm:
sharder.run_forever()
self.assertEqual('Test over', str(cm.exception))
# four cycles are started, two brokers visited per cycle, but
# fourth never completes
self.assertEqual(8, len(fake_process_broker_calls))
# expect initial random sleep then one sleep between first and
# second pass
self.assertEqual(2, mock_sleep.call_count)
self.assertLessEqual(mock_sleep.call_args_list[0][0][0], 30)
self.assertLessEqual(mock_sleep.call_args_list[1][0][0],
30 - fake_periods[0])
lines = sharder.logger.get_lines_for_level('info')
categories = ('visited', 'scanned', 'created', 'cleaved',
'misplaced', 'audit_root', 'audit_shard')
def check_categories(start_time):
for category in categories:
line = lines.pop(0)
self.assertIn('Since %s' % time.ctime(start_time), line)
self.assertIn(category, line)
for k, v in fake_stats.get(category, {}).items():
self.assertIn('%s:%s' % (k, v), line)
def check_logs(cycle_time, start_time,
expect_periodic_stats=False):
self.assertIn('Container sharder cycle starting', lines.pop(0))
check_categories(start_time)
if expect_periodic_stats:
check_categories(start_time)
self.assertIn('Container sharder cycle completed: %.02fs' %
cycle_time, lines.pop(0))
check_logs(sum(fake_periods[1:3]), fake_periods[0])
check_logs(sum(fake_periods[3:5]), sum(fake_periods[:3]),
expect_periodic_stats=True)
check_logs(sum(fake_periods[5:7]), sum(fake_periods[:5]))
# final cycle start but then exception pops to terminate test
self.assertIn('Container sharder cycle starting', lines.pop(0))
self.assertFalse(lines)
lines = sharder.logger.get_lines_for_level('error')
self.assertIn(
'Unhandled exception while dumping progress', lines[0])
self.assertIn('path: a/c', lines[0]) # match one of the brokers
self.assertIn('Test over', lines[0])
def check_recon(data, time, last, expected_stats):
self.assertEqual(time, data['sharding_time'])
self.assertEqual(last, data['sharding_last'])
self.assertEqual(
expected_stats, dict(data['sharding_stats']['sharding']))
def stats_for_candidate(broker):
return {'object_count': 0,
'account': broker.account,
'meta_timestamp': mock.ANY,
'container': broker.container,
'file_size': os.stat(broker.db_file).st_size,
'path': broker.db_file,
'root': broker.path,
'node_index': 0}
self.assertEqual(4, len(recon_data))
# stats report at end of first cycle
fake_stats.update({'visited': {'attempted': 2, 'skipped': 0,
'success': 2, 'failure': 0,
'completed': 0}})
fake_stats.update({
'sharding_candidates': {
'found': 2,
'top': [stats_for_candidate(call[0])
for call in fake_process_broker_calls[:2]]
}
})
fake_stats.update({
'shrinking_candidates': {
'found': 0,
'top': []
}
})
check_recon(recon_data[0], sum(fake_periods[1:3]),
sum(fake_periods[:3]), fake_stats)
# periodic stats report after first broker has been visited during
# second cycle - one candidate identified so far this cycle
fake_stats.update({'visited': {'attempted': 1, 'skipped': 0,
'success': 1, 'failure': 0,
'completed': 0}})
fake_stats.update({
'sharding_candidates': {
'found': 1,
'top': [stats_for_candidate(call[0])
for call in fake_process_broker_calls[2:3]]
}
})
check_recon(recon_data[1], fake_periods[3],
sum(fake_periods[:4]), fake_stats)
# stats report at end of second cycle - both candidates reported
fake_stats.update({'visited': {'attempted': 2, 'skipped': 0,
'success': 2, 'failure': 0,
'completed': 0}})
fake_stats.update({
'sharding_candidates': {
'found': 2,
'top': [stats_for_candidate(call[0])
for call in fake_process_broker_calls[2:4]]
}
})
check_recon(recon_data[2], sum(fake_periods[3:5]),
sum(fake_periods[:5]), fake_stats)
# stats report at end of third cycle
fake_stats.update({'visited': {'attempted': 2, 'skipped': 0,
'success': 2, 'failure': 0,
'completed': 0}})
fake_stats.update({
'sharding_candidates': {
'found': 2,
'top': [stats_for_candidate(call[0])
for call in fake_process_broker_calls[4:6]]
}
})
check_recon(recon_data[3], sum(fake_periods[5:7]),
sum(fake_periods[:7]), fake_stats)
def test_one_shard_cycle(self):
conf = {'recon_cache_path': self.tempdir,
'devices': self.tempdir,
'shard_container_threshold': 9}
def fake_ismount(path):
# unmounted_dev is defined from .get_more_nodes() below
unmounted_path = os.path.join(conf['devices'],
unmounted_dev['device'])
if path == unmounted_path:
return False
else:
return True
with self._mock_sharder(conf) as sharder, \
mock.patch('swift.common.utils.ismount', fake_ismount), \
mock.patch('swift.container.sharder.is_local_device',
return_value=True):
sharder.reported = time.time()
brokers = []
device_ids = set(d['id'] for d in sharder.ring.devs)
sharder.ring.max_more_nodes = 1
unmounted_dev = next(sharder.ring.get_more_nodes(1))
unmounted_dev['device'] = 'xxxx'
sharder.ring.add_node(unmounted_dev)
for device_id in device_ids:
brokers.append(self._make_broker(
container='c%s' % device_id, hash_='c%shash' % device_id,
device=sharder.ring.devs[device_id]['device'], part=0))
# enable a/c2 and a/c3 for sharding
for broker in brokers[1:]:
broker.update_metadata({'X-Container-Sysmeta-Sharding':
('true', next(self.ts_iter).internal)})
# make a/c2 a candidate for sharding
for i in range(10):
brokers[1].put_object('o%s' % i, next(self.ts_iter).internal,
0, 'text/plain', 'etag', 0)
# check only sharding enabled containers are processed
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
sharder._local_device_ids = {'stale_node_id': {}}
sharder._one_shard_cycle(Everything(), Everything())
lines = sharder.logger.get_lines_for_level('warning')
expected = 'Skipping %s as it is not mounted' % \
unmounted_dev['device']
self.assertIn(expected, lines[0])
self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(2, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
self.assertEqual({'a/c1', 'a/c2'}, set(processed_paths))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
expected_stats = {'attempted': 2, 'success': 2, 'failure': 0,
'skipped': 1, 'completed': 0}
self._assert_recon_stats(expected_stats, sharder, 'visited')
expected_candidate_stats = {
'found': 1,
'top': [{'object_count': 10, 'account': 'a', 'container': 'c1',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[1].db_file, 'root': 'a/c1',
'node_index': 1}]}
self._assert_recon_stats(
expected_candidate_stats, sharder, 'sharding_candidates')
self._assert_recon_stats(None, sharder, 'sharding_progress')
# enable and progress container a/c1 by giving it shard ranges
now = next(self.ts_iter)
brokers[0].merge_shard_ranges(
[ShardRange('a/c0', now, '', '', state=ShardRange.SHARDING),
ShardRange('.s_a/1', now, '', 'b', state=ShardRange.ACTIVE),
ShardRange('.s_a/2', now, 'b', 'c', state=ShardRange.CLEAVED),
ShardRange('.s_a/3', now, 'c', 'd', state=ShardRange.CREATED),
ShardRange('.s_a/4', now, 'd', 'e', state=ShardRange.CREATED),
ShardRange('.s_a/5', now, 'e', '', state=ShardRange.FOUND)])
brokers[1].merge_shard_ranges(
[ShardRange('a/c1', now, '', '', state=ShardRange.SHARDING),
ShardRange('.s_a/6', now, '', 'b', state=ShardRange.ACTIVE),
ShardRange('.s_a/7', now, 'b', 'c', state=ShardRange.ACTIVE),
ShardRange('.s_a/8', now, 'c', 'd', state=ShardRange.CLEAVED),
ShardRange('.s_a/9', now, 'd', 'e', state=ShardRange.CREATED),
ShardRange('.s_a/0', now, 'e', '', state=ShardRange.CREATED)])
for i in range(11):
brokers[2].put_object('o%s' % i, next(self.ts_iter).internal,
0, 'text/plain', 'etag', 0)
def mock_processing(broker, node, part):
if broker.path == 'a/c1':
raise Exception('kapow!')
elif broker.path not in ('a/c0', 'a/c2'):
raise BaseException("I don't know how to handle a broker "
"for %s" % broker.path)
# check exceptions are handled
sharder.logger.clear()
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker', side_effect=mock_processing
) as mock_process_broker:
sharder._local_device_ids = {'stale_node_id': {}}
sharder._one_shard_cycle(Everything(), Everything())
lines = sharder.logger.get_lines_for_level('warning')
expected = 'Skipping %s as it is not mounted' % \
unmounted_dev['device']
self.assertIn(expected, lines[0])
self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(3, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
self.assertEqual({'a/c0', 'a/c1', 'a/c2'}, set(processed_paths))
lines = sharder.logger.get_lines_for_level('error')
self.assertIn('Unhandled exception while processing', lines[0])
self.assertIn('path: a/c', lines[0]) # match one of the brokers
self.assertFalse(lines[1:])
sharder.logger.clear()
expected_stats = {'attempted': 3, 'success': 2, 'failure': 1,
'skipped': 0, 'completed': 0}
self._assert_recon_stats(expected_stats, sharder, 'visited')
expected_candidate_stats = {
'found': 1,
'top': [{'object_count': 11, 'account': 'a', 'container': 'c2',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[2].db_file, 'root': 'a/c2',
'node_index': 2}]}
self._assert_recon_stats(
expected_candidate_stats, sharder, 'sharding_candidates')
expected_in_progress_stats = {
'all': [{'object_count': 0, 'account': 'a', 'container': 'c0',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[0].db_file).st_size,
'path': brokers[0].db_file, 'root': 'a/c0',
'node_index': 0,
'found': 1, 'created': 2, 'cleaved': 1, 'active': 1,
'state': 'sharding', 'db_state': 'unsharded',
'error': None},
{'object_count': 10, 'account': 'a', 'container': 'c1',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[1].db_file, 'root': 'a/c1',
'node_index': 1,
'found': 0, 'created': 2, 'cleaved': 1, 'active': 2,
'state': 'sharding', 'db_state': 'unsharded',
'error': 'kapow!'}]}
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
# check that candidates and in progress stats don't stick in recon
own_shard_range = brokers[0].get_own_shard_range()
own_shard_range.state = ShardRange.ACTIVE
brokers[0].merge_shard_ranges([own_shard_range])
for i in range(10):
brokers[1].delete_object(
'o%s' % i, next(self.ts_iter).internal)
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(3, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
self.assertEqual({'a/c0', 'a/c1', 'a/c2'}, set(processed_paths))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
expected_stats = {'attempted': 3, 'success': 3, 'failure': 0,
'skipped': 0, 'completed': 0}
self._assert_recon_stats(expected_stats, sharder, 'visited')
self._assert_recon_stats(
expected_candidate_stats, sharder, 'sharding_candidates')
self._assert_recon_stats(None, sharder, 'sharding_progress')
# let's progress broker 1 (broker[0])
brokers[0].enable_sharding(next(self.ts_iter))
brokers[0].set_sharding_state()
shard_ranges = brokers[0].get_shard_ranges()
for sr in shard_ranges[:-1]:
sr.update_state(ShardRange.CLEAVED)
brokers[0].merge_shard_ranges(shard_ranges)
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
'all': [{'object_count': 0, 'account': 'a', 'container': 'c0',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[0].db_file).st_size,
'path': brokers[0].db_file, 'root': 'a/c0',
'node_index': 0,
'found': 1, 'created': 0, 'cleaved': 3, 'active': 1,
'state': 'sharding', 'db_state': 'sharding',
'error': None},
{'object_count': 0, 'account': 'a', 'container': 'c1',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[1].db_file, 'root': 'a/c1',
'node_index': 1,
'found': 0, 'created': 2, 'cleaved': 1, 'active': 2,
'state': 'sharding', 'db_state': 'unsharded',
'error': None}]}
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
# Now complete sharding broker 1.
shard_ranges[-1].update_state(ShardRange.CLEAVED)
own_sr = brokers[0].get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED)
brokers[0].merge_shard_ranges(shard_ranges + [own_sr])
# make and complete a cleave context, this is used for the
# recon_sharded_timeout timer.
cxt = CleavingContext.load(brokers[0])
cxt.misplaced_done = cxt.cleaving_done = True
ts_now = next(self.ts_iter)
with mock_timestamp_now(ts_now):
cxt.store(brokers[0])
self.assertTrue(brokers[0].set_sharded_state())
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
'all': [{'object_count': 0, 'account': 'a', 'container': 'c0',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[0].db_file).st_size,
'path': brokers[0].db_file, 'root': 'a/c0',
'node_index': 0,
'found': 0, 'created': 0, 'cleaved': 4, 'active': 1,
'state': 'sharded', 'db_state': 'sharded',
'error': None},
{'object_count': 0, 'account': 'a', 'container': 'c1',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[1].db_file, 'root': 'a/c1',
'node_index': 1,
'found': 0, 'created': 2, 'cleaved': 1, 'active': 2,
'state': 'sharding', 'db_state': 'unsharded',
'error': None}]}
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
# one more cycle at recon_sharded_timeout seconds into the
# future to check that the completed broker is still reported
ts_now = Timestamp(ts_now.timestamp +
sharder.recon_sharded_timeout)
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
# when we move recon_sharded_timeout + 1 seconds into the future,
# broker 1 will be removed from the progress report
ts_now = Timestamp(ts_now.timestamp +
sharder.recon_sharded_timeout + 1)
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
'all': [{'object_count': 0, 'account': 'a', 'container': 'c1',
'meta_timestamp': mock.ANY,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[1].db_file, 'root': 'a/c1',
'node_index': 1,
'found': 0, 'created': 2, 'cleaved': 1, 'active': 2,
'state': 'sharding', 'db_state': 'unsharded',
'error': None}]}
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
def test_one_shard_cycle_no_containers(self):
conf = {'recon_cache_path': self.tempdir,
'devices': self.tempdir,
'mount_check': False}
with self._mock_sharder(conf) as sharder:
for dev in sharder.ring.devs:
os.mkdir(os.path.join(self.tempdir, dev['device']))
with mock.patch('swift.container.sharder.is_local_device',
return_value=True):
sharder._one_shard_cycle(Everything(), Everything())
self.assertEqual([], sharder.logger.get_lines_for_level('warning'))
self.assertIn('Found no containers directories',
sharder.logger.get_lines_for_level('info'))
with self._mock_sharder(conf) as sharder:
os.mkdir(os.path.join(self.tempdir, dev['device'], 'containers'))
with mock.patch('swift.container.sharder.is_local_device',
return_value=True):
sharder._one_shard_cycle(Everything(), Everything())
self.assertEqual([], sharder.logger.get_lines_for_level('warning'))
self.assertNotIn('Found no containers directories',
sharder.logger.get_lines_for_level('info'))
def test_ratelimited_roundrobin(self):
n_databases = 100
def stub_iter(dirs):
for i in range(n_databases):
yield i, '/srv/node/sda/path/to/container.db', {}
now = time.time()
clock = {
'sleeps': [],
'now': now,
}
def fake_sleep(t):
clock['sleeps'].append(t)
clock['now'] += t
def fake_time():
return clock['now']
with self._mock_sharder({'databases_per_second': 1}) as sharder, \
mock.patch('swift.common.db_replicator.roundrobin_datadirs',
stub_iter), \
mock.patch('time.time', fake_time), \
mock.patch('eventlet.sleep', fake_sleep):
list(sharder.roundrobin_datadirs(None))
# 100 db at 1/s should take ~100s
run_time = sum(clock['sleeps'])
self.assertTrue(97 <= run_time < 100, 'took %s' % run_time)
n_databases = 1000
now = time.time()
clock = {
'sleeps': [],
'now': now,
}
with self._mock_sharder({'databases_per_second': 50}) as sharder, \
mock.patch('swift.common.db_replicator.roundrobin_datadirs',
stub_iter), \
mock.patch('time.time', fake_time), \
mock.patch('eventlet.sleep', fake_sleep):
list(sharder.roundrobin_datadirs(None))
# 1000 db at 50/s
run_time = sum(clock['sleeps'])
self.assertTrue(18 <= run_time < 20, 'took %s' % run_time)
@contextmanager
def _mock_sharder(self, conf=None, replicas=3):
self.logger.clear()
conf = conf or {}
conf['devices'] = self.tempdir
fake_ring = FakeRing(replicas=replicas, separate_replication=True)
with mock.patch(
'swift.container.sharder.internal_client.InternalClient'):
with mock.patch(
'swift.common.db_replicator.ring.Ring',
return_value=fake_ring):
sharder = ContainerSharder(conf, logger=self.logger)
sharder._local_device_ids = {dev['id']: dev
for dev in fake_ring.devs}
sharder._replicate_object = mock.MagicMock(
return_value=(True, [True] * sharder.ring.replica_count))
yield sharder
def _get_raw_object_records(self, broker):
# use list_objects_iter with no-op transform_func to get back actual
# un-transformed rows with encoded timestamps
return [list(obj) for obj in broker.list_objects_iter(
10, '', '', '', '', include_deleted=None, all_policies=True,
transform_func=lambda record: record)]
def _check_objects(self, expected_objs, shard_dbs):
shard_dbs = shard_dbs if isinstance(shard_dbs, list) else [shard_dbs]
shard_objs = []
for shard_db in shard_dbs:
shard_broker = ContainerBroker(shard_db)
shard_objs.extend(self._get_raw_object_records(shard_broker))
expected_objs = [list(obj) for obj in expected_objs]
self.assertEqual(expected_objs, shard_objs)
def _check_shard_range(self, expected, actual):
expected_dict = dict(expected)
actual_dict = dict(actual)
self.assertGreater(actual_dict.pop('meta_timestamp'),
expected_dict.pop('meta_timestamp'))
self.assertEqual(expected_dict, actual_dict)
def test_check_node(self):
node = {
'replication_ip': '127.0.0.1',
'replication_port': 5000,
'device': 'd100',
}
with self._mock_sharder() as sharder:
sharder.mount_check = True
sharder.ips = ['127.0.0.1']
sharder.port = 5000
# normal behavior
with mock.patch(
'swift.common.utils.ismount',
lambda *args: True):
r = sharder._check_node(node)
expected = os.path.join(sharder.conf['devices'], node['device'])
self.assertEqual(r, expected)
# test with an unmounted drive
with mock.patch(
'swift.common.utils.ismount',
lambda *args: False):
r = sharder._check_node(node)
self.assertEqual(r, False)
lines = sharder.logger.get_lines_for_level('warning')
expected = 'Skipping %s as it is not mounted' % node['device']
self.assertIn(expected, lines[0])
def test_fetch_shard_ranges_unexpected_response(self):
broker = self._make_broker()
exc = internal_client.UnexpectedResponse(
'Unexpected response: 404', None)
with self._mock_sharder() as sharder:
sharder.int_client.make_request.side_effect = exc
self.assertIsNone(sharder._fetch_shard_ranges(broker))
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Unexpected response: 404', lines[0])
self.assertFalse(lines[1:])
def test_fetch_shard_ranges_bad_record_type(self):
def do_test(mock_resp_headers):
with self._mock_sharder() as sharder:
mock_make_request = mock.MagicMock(
return_value=mock.MagicMock(headers=mock_resp_headers))
sharder.int_client.make_request = mock_make_request
self.assertIsNone(sharder._fetch_shard_ranges(broker))
lines = sharder.logger.get_lines_for_level('error')
self.assertIn('unexpected record type', lines[0])
self.assertFalse(lines[1:])
broker = self._make_broker()
do_test({})
do_test({'x-backend-record-type': 'object'})
do_test({'x-backend-record-type': 'disco'})
def test_fetch_shard_ranges_bad_data(self):
def do_test(mock_resp_body):
mock_resp_headers = {'x-backend-record-type': 'shard'}
with self._mock_sharder() as sharder:
mock_make_request = mock.MagicMock(
return_value=mock.MagicMock(headers=mock_resp_headers,
body=mock_resp_body))
sharder.int_client.make_request = mock_make_request
self.assertIsNone(sharder._fetch_shard_ranges(broker))
lines = sharder.logger.get_lines_for_level('error')
self.assertIn('invalid data', lines[0])
self.assertFalse(lines[1:])
broker = self._make_broker()
do_test({})
do_test('')
do_test(json.dumps({}))
do_test(json.dumps([{'account': 'a', 'container': 'c'}]))
def test_fetch_shard_ranges_ok(self):
def do_test(mock_resp_body, params):
mock_resp_headers = {'x-backend-record-type': 'shard'}
with self._mock_sharder() as sharder:
mock_make_request = mock.MagicMock(
return_value=mock.MagicMock(headers=mock_resp_headers,
body=mock_resp_body))
sharder.int_client.make_request = mock_make_request
mock_make_path = mock.MagicMock(return_value='/v1/a/c')
sharder.int_client.make_path = mock_make_path
actual = sharder._fetch_shard_ranges(broker, params=params)
sharder.int_client.make_path.assert_called_once_with('a', 'c')
self.assertFalse(sharder.logger.get_lines_for_level('error'))
return actual, mock_make_request
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Include-Deleted': 'False',
'X-Backend-Override-Deleted': 'true'}
broker = self._make_broker()
shard_ranges = self._make_shard_ranges((('', 'm'), ('m', '')))
params = {'format': 'json'}
actual, mock_call = do_test(json.dumps([dict(shard_ranges[0])]),
params={})
mock_call.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
self._assert_shard_ranges_equal([shard_ranges[0]], actual)
params = {'format': 'json', 'includes': 'thing'}
actual, mock_call = do_test(
json.dumps([dict(sr) for sr in shard_ranges]), params=params)
self._assert_shard_ranges_equal(shard_ranges, actual)
mock_call.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
params = {'format': 'json',
'end_marker': 'there', 'marker': 'here'}
actual, mock_call = do_test(json.dumps([]), params=params)
self._assert_shard_ranges_equal([], actual)
mock_call.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
def test_yield_objects(self):
broker = self._make_broker()
objects = [
('o%02d' % i, self.ts_encoded(), 10, 'text/plain', 'etag_a',
i % 2, 0) for i in range(30)]
for obj in objects:
broker.put_object(*obj)
src_range = ShardRange('dont/care', Timestamp.now())
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects(broker, src_range)]
self.assertEqual([15, 15], [len(b) for b in batches])
self.assertEqual([[0] * 15, [1] * 15],
[[o['deleted'] for o in b] for b in batches])
# custom batch size
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects(broker, src_range, batch_size=10)]
self.assertEqual([10, 5, 10, 5], [len(b) for b in batches])
self.assertEqual([[0] * 10, [0] * 5, [1] * 10, [1] * 5],
[[o['deleted'] for o in b] for b in batches])
# restricted source range
src_range = ShardRange('dont/care', Timestamp.now(),
lower='o10', upper='o20')
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects(broker, src_range)]
self.assertEqual([5, 5], [len(b) for b in batches])
self.assertEqual([[0] * 5, [1] * 5],
[[o['deleted'] for o in b] for b in batches])
# null source range
src_range = ShardRange('dont/care', Timestamp.now(),
lower=ShardRange.MAX)
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects(broker, src_range)]
self.assertEqual([], batches)
src_range = ShardRange('dont/care', Timestamp.now(),
upper=ShardRange.MIN)
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects(broker, src_range)]
self.assertEqual([], batches)
def test_yield_objects_to_shard_range_no_objects(self):
# verify that dest_shard_ranges func is not called if the source
# broker has no objects
broker = self._make_broker()
dest_shard_ranges = mock.MagicMock()
src_range = ShardRange('dont/care', Timestamp.now())
with self._mock_sharder(conf={}) as sharder:
batches = [b for b, _ in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([], batches)
dest_shard_ranges.assert_not_called()
def test_yield_objects_to_shard_range(self):
broker = self._make_broker()
objects = [
('o%02d' % i, self.ts_encoded(), 10, 'text/plain', 'etag_a',
i % 2, 0) for i in range(30)]
for obj in objects:
broker.put_object(*obj)
orig_info = broker.get_info()
# yield_objects annotates the info dict...
orig_info['max_row'] = 30
dest_ranges = [
ShardRange('shard/0', Timestamp.now(), upper='o09'),
ShardRange('shard/1', Timestamp.now(), lower='o09', upper='o19'),
ShardRange('shard/2', Timestamp.now(), lower='o19'),
]
# complete overlap of src and dest, multiple batches per dest shard
# range per deleted/not deleted
src_range = ShardRange('dont/care', Timestamp.now())
dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
with self._mock_sharder(conf={'cleave_row_batch_size': 4}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([dest_ranges[0], dest_ranges[0],
dest_ranges[0], dest_ranges[0],
dest_ranges[1], dest_ranges[1],
dest_ranges[1], dest_ranges[1],
dest_ranges[2], dest_ranges[2],
dest_ranges[2], dest_ranges[2]],
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[0:8:2]],
[o[0] for o in objects[8:10:2]],
[o[0] for o in objects[1:8:2]],
[o[0] for o in objects[9:10:2]],
[o[0] for o in objects[10:18:2]],
[o[0] for o in objects[18:20:2]],
[o[0] for o in objects[11:18:2]],
[o[0] for o in objects[19:20:2]],
[o[0] for o in objects[20:28:2]],
[o[0] for o in objects[28:30:2]],
[o[0] for o in objects[21:28:2]],
[o[0] for o in objects[29:30:2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 12, [info for _, _, info in yielded])
# src narrower than dest
src_range = ShardRange('dont/care', Timestamp.now(),
lower='o15', upper='o25')
dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
with self._mock_sharder(conf={}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([dest_ranges[1], dest_ranges[1],
dest_ranges[2], dest_ranges[2]],
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[16:20:2]],
[o[0] for o in objects[17:20:2]],
[o[0] for o in objects[20:26:2]],
[o[0] for o in objects[21:26:2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 4, [info for _, _, info in yielded])
# src much narrower than dest
src_range = ShardRange('dont/care', Timestamp.now(),
lower='o15', upper='o18')
dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
with self._mock_sharder(conf={}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([dest_ranges[1], dest_ranges[1]],
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[16:19:2]],
[o[0] for o in objects[17:19:2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 2, [info for _, _, info in yielded])
# dest narrower than src
src_range = ShardRange('dont/care', Timestamp.now(),
lower='o05', upper='o25')
dest_shard_ranges = mock.MagicMock(return_value=dest_ranges[1:])
with self._mock_sharder(conf={}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([None, None,
dest_ranges[1], dest_ranges[1],
dest_ranges[2], dest_ranges[2]],
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[6:10:2]],
[o[0] for o in objects[7:10:2]],
[o[0] for o in objects[10:20:2]],
[o[0] for o in objects[11:20:2]],
[o[0] for o in objects[20:26:2]],
[o[0] for o in objects[21:26:2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 6, [info for _, _, info in yielded])
# dest much narrower than src
src_range = ShardRange('dont/care', Timestamp.now(),
lower='o05', upper='o25')
dest_shard_ranges = mock.MagicMock(return_value=dest_ranges[1:2])
with self._mock_sharder(conf={}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([None, None,
dest_ranges[1], dest_ranges[1],
None, None],
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[6:10:2]],
[o[0] for o in objects[7:10:2]],
[o[0] for o in objects[10:20:2]],
[o[0] for o in objects[11:20:2]],
[o[0] for o in objects[20:26:2]],
[o[0] for o in objects[21:26:2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 6, [info for _, _, info in yielded])
# no dest, source is entire namespace, multiple batches
src_range = ShardRange('dont/care', Timestamp.now())
dest_shard_ranges = mock.MagicMock(return_value=[])
with self._mock_sharder(conf={'cleave_row_batch_size': 10}) as sharder:
yielded = [y for y in
sharder.yield_objects_to_shard_range(
broker, src_range, dest_shard_ranges)]
self.assertEqual([None] * 4,
[dest for _, dest, _ in yielded])
self.assertEqual([[o[0] for o in objects[:20:2]],
[o[0] for o in objects[20::2]],
[o[0] for o in objects[1:20:2]],
[o[0] for o in objects[21::2]]],
[[o['name'] for o in objs] for objs, _, _ in yielded])
self.assertEqual([orig_info] * 4, [info for _, _, info in yielded])
def _check_cleave_root(self, conf=None):
broker = self._make_broker()
objects = [
# shard 0
('a', self.ts_encoded(), 10, 'text/plain', 'etag_a', 0, 0),
('here', self.ts_encoded(), 10, 'text/plain', 'etag_here', 0, 0),
# shard 1
('m', self.ts_encoded(), 1, 'text/plain', 'etag_m', 0, 0),
('n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0),
('there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 0),
# shard 2
('where', self.ts_encoded(), 100, 'text/plain', 'etag_where', 0,
0),
# shard 3
('x', self.ts_encoded(), 0, '', '', 1, 0), # deleted
('y', self.ts_encoded(), 1000, 'text/plain', 'etag_y', 0, 0),
# shard 4
('yyyy', self.ts_encoded(), 14, 'text/plain', 'etag_yyyy', 0, 0),
]
for obj in objects:
broker.put_object(*obj)
initial_root_info = broker.get_info()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'here'), ('here', 'there'),
('there', 'where'), ('where', 'yonder'),
('yonder', ''))
shard_ranges = self._make_shard_ranges(shard_bounds)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
# used to accumulate stats from sharded dbs
total_shard_stats = {'object_count': 0, 'bytes_used': 0}
# run cleave - no shard ranges, nothing happens
with self._mock_sharder(conf=conf) as sharder:
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(0, context.ranges_done)
self.assertEqual(0, context.ranges_todo)
self.assertEqual(UNSHARDED, broker.get_db_state())
sharder._replicate_object.assert_not_called()
for db in expected_shard_dbs:
with annotate_failure(db):
self.assertFalse(os.path.exists(db))
# run cleave - all shard ranges in found state, nothing happens
broker.merge_shard_ranges(shard_ranges[:4])
self.assertTrue(broker.set_sharding_state())
with self._mock_sharder(conf=conf) as sharder:
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(0, context.ranges_done)
self.assertEqual(4, context.ranges_todo)
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_not_called()
for db in expected_shard_dbs:
with annotate_failure(db):
self.assertFalse(os.path.exists(db))
for shard_range in broker.get_shard_ranges():
with annotate_failure(shard_range):
self.assertEqual(ShardRange.FOUND, shard_range.state)
# move first shard range to created state, first shard range is cleaved
shard_ranges[0].update_state(ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges[:1])
with self._mock_sharder(conf=conf) as sharder:
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
'min_time': mock.ANY, 'max_time': mock.ANY,
'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[0], 0)
shard_broker = ContainerBroker(expected_shard_dbs[0])
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.CLEAVED, shard_own_sr.state)
shard_info = shard_broker.get_info()
total_shard_stats['object_count'] += shard_info['object_count']
total_shard_stats['bytes_used'] += shard_info['bytes_used']
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
# update expected state and metadata, check cleaved shard range
shard_ranges[0].bytes_used = 20
shard_ranges[0].object_count = 2
shard_ranges[0].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
self._check_objects(objects[:2], expected_shard_dbs[0])
# other shard ranges should be unchanged
for i in range(1, len(shard_ranges)):
with annotate_failure(i):
self.assertFalse(os.path.exists(expected_shard_dbs[i]))
for i in range(1, len(updated_shard_ranges)):
with annotate_failure(i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_ranges[i]))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('here', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(1, context.ranges_done)
self.assertEqual(3, context.ranges_todo)
unlink_files(expected_shard_dbs)
# move more shard ranges to created state
for i in range(1, 4):
shard_ranges[i].update_state(ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges[1:4])
# replication of next shard range is not sufficiently successful
with self._mock_sharder(conf=conf) as sharder:
quorum = quorum_size(sharder.ring.replica_count)
successes = [True] * (quorum - 1)
fails = [False] * (sharder.ring.replica_count - len(successes))
responses = successes + fails
random.shuffle(responses)
sharder._replicate_object = mock.MagicMock(
side_effect=((False, responses),))
self.assertFalse(sharder._cleave(broker))
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
expected = {'attempted': 1, 'success': 0, 'failure': 1,
'min_time': mock.ANY, 'max_time': mock.ANY,
'db_created': 1, 'db_exists': 0}
self._assert_stats(expected, sharder, 'cleaved')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_exists'))
# cleaving state is unchanged
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
for i in range(1, len(updated_shard_ranges)):
with annotate_failure(i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_ranges[i]))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('here', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(1, context.ranges_done)
self.assertEqual(3, context.ranges_todo)
# try again, this time replication is sufficiently successful
with self._mock_sharder(conf=conf) as sharder:
successes = [True] * quorum
fails = [False] * (sharder.ring.replica_count - len(successes))
responses1 = successes + fails
responses2 = fails + successes
sharder._replicate_object = mock.MagicMock(
side_effect=((False, responses1), (False, responses2)))
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 2, 'success': 2, 'failure': 0,
'min_time': mock.ANY, 'max_time': mock.ANY,
'db_created': 1, 'db_exists': 1}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_created'))
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in expected_shard_dbs[1:3]]
)
for db in expected_shard_dbs[1:3]:
shard_broker = ContainerBroker(db)
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.CLEAVED, shard_own_sr.state)
shard_info = shard_broker.get_info()
total_shard_stats['object_count'] += shard_info['object_count']
total_shard_stats['bytes_used'] += shard_info['bytes_used']
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
# only 2 are cleaved per batch
# update expected state and metadata, check cleaved shard ranges
shard_ranges[1].bytes_used = 6
shard_ranges[1].object_count = 3
shard_ranges[1].state = ShardRange.CLEAVED
shard_ranges[2].bytes_used = 100
shard_ranges[2].object_count = 1
shard_ranges[2].state = ShardRange.CLEAVED
for i in range(0, 3):
with annotate_failure(i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
self._check_objects(objects[2:5], expected_shard_dbs[1])
self._check_objects(objects[5:6], expected_shard_dbs[2])
# other shard ranges should be unchanged
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
for i, db in enumerate(expected_shard_dbs[3:], 3):
with annotate_failure(i):
self.assertFalse(os.path.exists(db))
for i, updated_shard_range in enumerate(updated_shard_ranges[3:], 3):
with annotate_failure(i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_range))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('where', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(3, context.ranges_done)
self.assertEqual(1, context.ranges_todo)
unlink_files(expected_shard_dbs)
# run cleave again - should process the fourth range
with self._mock_sharder(conf=conf) as sharder:
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
'min_time': mock.ANY, 'max_time': mock.ANY,
'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[3], 0)
shard_broker = ContainerBroker(expected_shard_dbs[3])
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.CLEAVED, shard_own_sr.state)
shard_info = shard_broker.get_info()
total_shard_stats['object_count'] += shard_info['object_count']
total_shard_stats['bytes_used'] += shard_info['bytes_used']
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
shard_ranges[3].bytes_used = 1000
shard_ranges[3].object_count = 1
shard_ranges[3].state = ShardRange.CLEAVED
for i in range(0, 4):
with annotate_failure(i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
# NB includes the deleted object
self._check_objects(objects[6:8], expected_shard_dbs[3])
# other shard ranges should be unchanged
for i, db in enumerate(expected_shard_dbs[:3]):
with annotate_failure(i):
self.assertFalse(os.path.exists(db))
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
for i, updated_shard_range in enumerate(updated_shard_ranges[4:], 4):
with annotate_failure(i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_range))
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('yonder', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(4, context.ranges_done)
self.assertEqual(0, context.ranges_todo)
unlink_files(expected_shard_dbs)
# run cleave - should be a no-op, all existing ranges have been cleaved
with self._mock_sharder(conf=conf) as sharder:
self.assertFalse(sharder._cleave(broker))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_not_called()
# add final shard range - move this to ACTIVE state and update stats to
# simulate another replica having cleaved it and replicated its state
shard_ranges[4].update_state(ShardRange.ACTIVE)
shard_ranges[4].update_meta(2, 15)
broker.merge_shard_ranges(shard_ranges[4:])
with self._mock_sharder(conf=conf) as sharder:
self.assertTrue(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
'min_time': mock.ANY, 'max_time': mock.ANY,
'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'cleaved_db_exists'))
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[4], 0)
shard_broker = ContainerBroker(expected_shard_dbs[4])
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.ACTIVE, shard_own_sr.state)
shard_info = shard_broker.get_info()
total_shard_stats['object_count'] += shard_info['object_count']
total_shard_stats['bytes_used'] += shard_info['bytes_used']
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(5, len(updated_shard_ranges))
# NB stats of the ACTIVE shard range should not be reset by cleaving
for i in range(0, 4):
with annotate_failure(i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
self.assertEqual(dict(shard_ranges[4]), dict(updated_shard_ranges[4]))
# object copied to shard
self._check_objects(objects[8:], expected_shard_dbs[4])
# other shard ranges should be unchanged
for i, db in enumerate(expected_shard_dbs[:4]):
with annotate_failure(i):
self.assertFalse(os.path.exists(db))
self.assertEqual(initial_root_info['object_count'],
total_shard_stats['object_count'])
self.assertEqual(initial_root_info['bytes_used'],
total_shard_stats['bytes_used'])
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertTrue(context.done())
self.assertEqual('', context.cursor)
self.assertEqual(9, context.cleave_to_row)
self.assertEqual(9, context.max_row)
self.assertEqual(5, context.ranges_done)
self.assertEqual(0, context.ranges_todo)
with self._mock_sharder(conf=conf) as sharder:
self.assertTrue(sharder._cleave(broker))
sharder._replicate_object.assert_not_called()
self.assertTrue(broker.set_sharded_state())
# run cleave - should be a no-op
with self._mock_sharder(conf=conf) as sharder:
self.assertTrue(sharder._cleave(broker))
sharder._replicate_object.assert_not_called()
def test_cleave_root(self):
self._check_cleave_root()
def test_cleave_root_listing_limit_one(self):
# force yield_objects to update its marker and call to the broker's
# get_objects() for each shard range, to check the marker moves on
self._check_cleave_root(conf={'cleave_row_batch_size': 1})
def test_cleave_root_ranges_change(self):
# verify that objects are not missed if shard ranges change between
# cleaving batches
broker = self._make_broker()
# this root db has very few object rows...
objects = [
('a', self.ts_encoded(), 10, 'text/plain', 'etag_a', 0, 0),
('b', self.ts_encoded(), 10, 'text/plain', 'etag_b', 0, 0),
('c', self.ts_encoded(), 1, 'text/plain', 'etag_c', 0, 0),
('d', self.ts_encoded(), 2, 'text/plain', 'etag_d', 0, 0),
('e', self.ts_encoded(), 3, 'text/plain', 'etag_e', 0, 0),
('f', self.ts_encoded(), 100, 'text/plain', 'etag_f', 0, 0),
('x', self.ts_encoded(), 0, '', '', 1, 0), # deleted
('z', self.ts_encoded(), 1000, 'text/plain', 'etag_z', 0, 0)
]
for obj in objects:
broker.put_object(*obj)
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
# shard ranges start life with object count that is typically much
# larger than this DB's object population...
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED, object_count=500000)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges[:3])
self.assertTrue(broker.set_sharding_state())
# run cleave - first batch is cleaved
with self._mock_sharder() as sharder:
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual(shard_ranges[1].upper_str, context.cursor)
self.assertEqual(8, context.cleave_to_row)
self.assertEqual(8, context.max_row)
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in expected_shard_dbs[:2]]
)
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(3, len(updated_shard_ranges))
# now they have reached CLEAVED state, the first 2 shard ranges should
# have updated object count, bytes used and meta_timestamp
shard_ranges[0].bytes_used = 23
shard_ranges[0].object_count = 4
shard_ranges[0].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
shard_ranges[1].bytes_used = 103
shard_ranges[1].object_count = 2
shard_ranges[1].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[1], updated_shard_ranges[1])
self._check_objects(objects[:4], expected_shard_dbs[0])
self._check_objects(objects[4:7], expected_shard_dbs[1])
# the actual object counts were set in the new shard brokers' own_sr's
shard_broker = ContainerBroker(expected_shard_dbs[0])
self.assertEqual(4, shard_broker.get_own_shard_range().object_count)
shard_broker = ContainerBroker(expected_shard_dbs[1])
self.assertEqual(2, shard_broker.get_own_shard_range().object_count)
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
# third shard range should be unchanged - not yet cleaved
self.assertEqual(dict(shard_ranges[2]),
dict(updated_shard_ranges[2]))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual(shard_ranges[1].upper_str, context.cursor)
self.assertEqual(8, context.cleave_to_row)
self.assertEqual(8, context.max_row)
# now change the shard ranges so that third consumes second
shard_ranges[1].set_deleted()
shard_ranges[2].lower = 'd'
shard_ranges[2].timestamp = Timestamp.now()
broker.merge_shard_ranges(shard_ranges[1:3])
# run cleave - should process the extended third (final) range
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[2], 0)
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(2, len(updated_shard_ranges))
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
# third shard range should now have updated object count, bytes used,
# including objects previously in the second shard range
shard_ranges[2].bytes_used = 1103
shard_ranges[2].object_count = 3
shard_ranges[2].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[2], updated_shard_ranges[1])
self._check_objects(objects[4:8], expected_shard_dbs[2])
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertTrue(context.done())
self.assertEqual(shard_ranges[2].upper_str, context.cursor)
self.assertEqual(8, context.cleave_to_row)
self.assertEqual(8, context.max_row)
def test_cleave_root_empty_db_with_ranges(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
sharder_conf = {'cleave_batch_size': 1}
with self._mock_sharder(sharder_conf) as sharder:
self.assertTrue(sharder._cleave(broker))
info_lines = sharder.logger.get_lines_for_level('info')
expected_zero_obj = [line for line in info_lines
if " - zero objects found" in line]
self.assertEqual(len(expected_zero_obj), len(shard_bounds))
cleaving_context = CleavingContext.load(broker)
# even though there is a cleave_batch_size of 1, we don't count empty
# ranges when cleaving seeing as they aren't replicated
self.assertEqual(cleaving_context.ranges_done, 3)
self.assertEqual(cleaving_context.ranges_todo, 0)
self.assertTrue(cleaving_context.cleaving_done)
self.assertEqual([ShardRange.CLEAVED] * 3,
[sr.state for sr in broker.get_shard_ranges()])
def test_cleave_root_empty_db_with_pre_existing_shard_db_handoff(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
sharder_conf = {'cleave_batch_size': 1}
with self._mock_sharder(sharder_conf) as sharder:
# pre-create a shard broker on a handoff location. This will force
# the sharder to not skip it but instead force to replicate it and
# use up a cleave_batch_size count.
sharder._get_shard_broker(shard_ranges[0], broker.root_path,
0)
self.assertFalse(sharder._cleave(broker))
info_lines = sharder.logger.get_lines_for_level('info')
expected_zero_obj = [line for line in info_lines
if " - zero objects found" in line]
self.assertEqual(len(expected_zero_obj), 1)
cleaving_context = CleavingContext.load(broker)
# even though there is a cleave_batch_size of 1, we don't count empty
# ranges when cleaving seeing as they aren't replicated
self.assertEqual(cleaving_context.ranges_done, 1)
self.assertEqual(cleaving_context.ranges_todo, 2)
self.assertFalse(cleaving_context.cleaving_done)
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
def test_cleave_shard_range_no_own_shard_range(self):
# create an unsharded broker that has shard ranges but no
# own_shard_range, verify that it does not cleave...
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
shard_ranges = self._make_shard_ranges(
(('', 'middle'), ('middle', '')),
state=ShardRange.CLEAVED)
broker.merge_shard_ranges(shard_ranges)
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_items([obj])
with self._mock_sharder() as sharder:
self.assertFalse(sharder._cleave(broker))
self.assertEqual(UNSHARDED, broker.get_db_state())
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(warning_lines[0],
'Failed to get own_shard_range, path: a/c, db: %s'
% broker.db_file)
sharder._replicate_object.assert_not_called()
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
# only the root broker on disk
suffix_dir = os.path.dirname(broker.db_dir)
self.assertEqual([os.path.basename(broker.db_dir)],
os.listdir(suffix_dir))
partition_dir = os.path.dirname(suffix_dir)
self.assertEqual([broker.db_dir[-3:]], os.listdir(partition_dir))
containers_dir = os.path.dirname(partition_dir)
self.assertEqual(['0'], os.listdir(containers_dir))
def test_cleave_shard(self):
broker = self._make_broker(account='.shards_a', container='shard_c')
own_shard_range = ShardRange(
broker.path, Timestamp.now(), 'here', 'where',
state=ShardRange.SHARDING, epoch=Timestamp.now())
broker.merge_shard_ranges([own_shard_range])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container()) # sanity check
objects = [
('m', self.ts_encoded(), 1, 'text/plain', 'etag_m', 0, 0),
('n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0),
('there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 0),
('where', self.ts_encoded(), 100, 'text/plain', 'etag_where', 0,
0),
]
misplaced_objects = [
('a', self.ts_encoded(), 1, 'text/plain', 'etag_a', 0, 0),
('z', self.ts_encoded(), 100, 'text/plain', 'etag_z', 1, 0),
]
for obj in objects + misplaced_objects:
broker.put_object(*obj)
shard_bounds = (('here', 'there'),
('there', 'where'))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
misplaced_bounds = (('', 'here'),
('where', ''))
misplaced_ranges = self._make_shard_ranges(
misplaced_bounds, state=ShardRange.ACTIVE)
misplaced_dbs = []
for shard_range in misplaced_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
misplaced_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
# run cleave - first range is cleaved but move of misplaced objects is
# not successful
sharder_conf = {'cleave_batch_size': 1}
with self._mock_sharder(sharder_conf) as sharder:
with mock.patch.object(
sharder, '_make_shard_range_fetcher',
return_value=lambda: iter(misplaced_ranges)):
# cause misplaced objects replication to not succeed
quorum = quorum_size(sharder.ring.replica_count)
successes = [True] * (quorum - 1)
fails = [False] * (sharder.ring.replica_count - len(successes))
responses = successes + fails
random.shuffle(responses)
bad_result = (False, responses)
ok_result = (True, [True] * sharder.ring.replica_count)
sharder._replicate_object = mock.MagicMock(
# result for misplaced, misplaced, cleave
side_effect=(bad_result, ok_result, ok_result))
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertFalse(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertEqual(shard_ranges[0].upper_str, context.cursor)
self.assertEqual(6, context.cleave_to_row)
self.assertEqual(6, context.max_row)
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_has_calls(
[mock.call(0, misplaced_dbs[0], 0),
mock.call(0, misplaced_dbs[1], 0),
mock.call(0, expected_shard_dbs[0], 0)])
shard_broker = ContainerBroker(expected_shard_dbs[0])
# NB cleaving a shard, state goes to CLEAVED not ACTIVE
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.CLEAVED, shard_own_sr.state)
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(2, len(updated_shard_ranges))
# first shard range should have updated object count, bytes used and
# meta_timestamp
shard_ranges[0].bytes_used = 6
shard_ranges[0].object_count = 3
shard_ranges[0].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
self._check_objects(objects[:3], expected_shard_dbs[0])
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self._check_objects(misplaced_objects[:1], misplaced_dbs[0])
self._check_objects(misplaced_objects[1:], misplaced_dbs[1])
unlink_files(expected_shard_dbs)
unlink_files(misplaced_dbs)
# run cleave - second (final) range is cleaved; move this range to
# CLEAVED state and update stats to simulate another replica having
# cleaved it and replicated its state
shard_ranges[1].update_state(ShardRange.CLEAVED)
shard_ranges[1].update_meta(2, 15)
broker.merge_shard_ranges(shard_ranges[1:2])
with self._mock_sharder(sharder_conf) as sharder:
with mock.patch.object(
sharder, '_make_shard_range_fetcher',
return_value=lambda: iter(misplaced_ranges)):
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(shard_ranges[1].upper_str, context.cursor)
self.assertEqual(6, context.cleave_to_row)
self.assertEqual(6, context.max_row)
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_has_calls(
[mock.call(0, misplaced_dbs[0], 0),
mock.call(0, expected_shard_dbs[1], 0)])
shard_broker = ContainerBroker(expected_shard_dbs[1])
shard_own_sr = shard_broker.get_own_shard_range()
self.assertEqual(ShardRange.CLEAVED, shard_own_sr.state)
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(2, len(updated_shard_ranges))
# second shard range should have updated object count, bytes used and
# meta_timestamp
self.assertEqual(dict(shard_ranges[1]), dict(updated_shard_ranges[1]))
self._check_objects(objects[3:], expected_shard_dbs[1])
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
self._check_objects(misplaced_objects[:1], misplaced_dbs[0])
self.assertFalse(os.path.exists(misplaced_dbs[1]))
def test_cleave_shard_shrinking(self):
unique = [0]
def do_test(acceptor_state, acceptor_bounds, expect_delete,
exp_progress_bounds=None):
# 'unique' ensures fresh dbs on each test iteration
unique[0] += 1
objects = [
('i', self.ts_encoded(), 3, 'text/plain', 'etag_t', 0, 0),
('m', self.ts_encoded(), 33, 'text/plain', 'etag_m', 0, 0),
('w', self.ts_encoded(), 100, 'text/plain', 'etag_w', 0, 0),
]
broker = self._make_shrinking_broker(
container='donor_%s' % unique[0], lower='h', upper='w',
objects=objects)
acceptor_epoch = next(self.ts_iter)
acceptors = [
ShardRange('.shards_a/acceptor_%s_%s' % (unique[0], bounds[1]),
Timestamp.now(), bounds[0], bounds[1],
'1000', '11111',
state=acceptor_state, epoch=acceptor_epoch)
for bounds in acceptor_bounds]
# by default expect cleaving to progress through all acceptors
if exp_progress_bounds is None:
exp_progress_acceptors = acceptors
else:
exp_progress_acceptors = [
ShardRange(
'.shards_a/acceptor_%s_%s' % (unique[0], bounds[1]),
Timestamp.now(), bounds[0], bounds[1], '1000', '11111',
state=acceptor_state, epoch=acceptor_epoch)
for bounds in exp_progress_bounds]
expected_acceptor_dbs = []
for acceptor in exp_progress_acceptors:
db_hash = hash_path(acceptor.account,
acceptor.container)
# NB expected cleaved db name includes acceptor epoch
db_name = '%s_%s.db' % (db_hash, acceptor_epoch.internal)
expected_acceptor_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_name))
broker.merge_shard_ranges(acceptors)
# run cleave
with mock_timestamp_now_with_iter(self.ts_iter):
with self._mock_sharder() as sharder:
sharder.cleave_batch_size = 3
self.assertEqual(expect_delete, sharder._cleave(broker))
# check the cleave context and source broker
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertEqual(expect_delete, context.cleaving_done)
own_sr = broker.get_own_shard_range()
if exp_progress_acceptors:
expected_cursor = exp_progress_acceptors[-1].upper_str
else:
expected_cursor = own_sr.lower_str
self.assertEqual(expected_cursor, context.cursor)
self.assertEqual(3, context.cleave_to_row)
self.assertEqual(3, context.max_row)
self.assertEqual(SHARDING, broker.get_db_state())
if expect_delete and len(acceptor_bounds) == 1:
self.assertTrue(own_sr.deleted)
self.assertEqual(ShardRange.SHRUNK, own_sr.state)
else:
self.assertFalse(own_sr.deleted)
self.assertEqual(ShardRange.SHRINKING, own_sr.state)
# check the acceptor db's
sharder._replicate_object.assert_has_calls(
[mock.call(0, acceptor_db, 0)
for acceptor_db in expected_acceptor_dbs])
for acceptor_db in expected_acceptor_dbs:
self.assertTrue(os.path.exists(acceptor_db))
# NB when *shrinking* a shard container then expect the
# acceptor broker's own shard range state to remain in the
# original state of the acceptor shard range rather than being
# set to CLEAVED as it would when *sharding*.
acceptor_broker = ContainerBroker(acceptor_db)
self.assertEqual(acceptor_state,
acceptor_broker.get_own_shard_range().state)
acceptor_ranges = acceptor_broker.get_shard_ranges(
include_deleted=True)
if expect_delete and len(acceptor_bounds) == 1:
# special case when deleted shrinking shard range is
# forwarded to single enclosing acceptor
self.assertEqual([own_sr], acceptor_ranges)
self.assertTrue(acceptor_ranges[0].deleted)
self.assertEqual(ShardRange.SHRUNK,
acceptor_ranges[0].state)
else:
self.assertEqual([], acceptor_ranges)
expected_objects = [
obj for obj in objects
if any(acceptor.lower < obj[0] <= acceptor.upper
for acceptor in exp_progress_acceptors)
]
self._check_objects(expected_objects, expected_acceptor_dbs)
# check that *shrinking* shard's copies of acceptor ranges are not
# updated as they would be if *sharding*
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual([dict(sr) for sr in acceptors],
[dict(sr) for sr in updated_shard_ranges])
# check that *shrinking* shard's copies of acceptor ranges are not
# updated when completing sharding as they would be if *sharding*
with mock_timestamp_now_with_iter(self.ts_iter):
sharder._complete_sharding(broker)
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual([dict(sr) for sr in acceptors],
[dict(sr) for sr in updated_shard_ranges])
own_sr = broker.get_own_shard_range()
self.assertEqual(expect_delete, own_sr.deleted)
if expect_delete:
self.assertEqual(ShardRange.SHRUNK, own_sr.state)
else:
self.assertEqual(ShardRange.SHRINKING, own_sr.state)
# note: shrinking shard bounds are (h, w)
# shrinking to a single acceptor with enclosing namespace
expect_delete = True
do_test(ShardRange.CREATED, (('h', ''),), expect_delete)
do_test(ShardRange.CLEAVED, (('h', ''),), expect_delete)
do_test(ShardRange.ACTIVE, (('h', ''),), expect_delete)
# shrinking to multiple acceptors that enclose namespace
do_test(ShardRange.CREATED, (('d', 'k'), ('k', '')), expect_delete)
do_test(ShardRange.CLEAVED, (('d', 'k'), ('k', '')), expect_delete)
do_test(ShardRange.ACTIVE, (('d', 'k'), ('k', '')), expect_delete)
do_test(ShardRange.CLEAVED, (('d', 'k'), ('k', 't'), ('t', '')),
expect_delete)
do_test(ShardRange.CREATED, (('d', 'k'), ('k', 't'), ('t', '')),
expect_delete)
do_test(ShardRange.ACTIVE, (('d', 'k'), ('k', 't'), ('t', '')),
expect_delete)
# shrinking to incomplete acceptors, gap at end of namespace
expect_delete = False
do_test(ShardRange.CREATED, (('d', 'k'),), expect_delete)
do_test(ShardRange.CLEAVED, (('d', 'k'), ('k', 't')), expect_delete)
# shrinking to incomplete acceptors, gap at start and end of namespace
do_test(ShardRange.CREATED, (('k', 't'),), expect_delete,
exp_progress_bounds=())
# shrinking to incomplete acceptors, gap at start of namespace
do_test(ShardRange.CLEAVED, (('k', 't'), ('t', '')), expect_delete,
exp_progress_bounds=())
# shrinking to incomplete acceptors, gap in middle - some progress
do_test(ShardRange.CLEAVED, (('d', 'k'), ('t', '')), expect_delete,
exp_progress_bounds=(('d', 'k'),))
def test_cleave_repeated(self):
# verify that if new objects are merged into retiring db after cleaving
# started then cleaving will repeat but only new objects are cleaved
# in the repeated cleaving pass
broker = self._make_broker()
objects = [
('obj%03d' % i, next(self.ts_iter), 1, 'text/plain', 'etag', 0, 0)
for i in range(10)
]
new_objects = [
(name, next(self.ts_iter), 1, 'text/plain', 'etag', 0, 0)
for name in ('alpha', 'zeta')
]
for obj in objects:
broker.put_object(*obj)
broker._commit_puts()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'obj004'), ('obj004', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges)
old_broker = broker.get_brokers()[0]
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
calls = []
key = ('name', 'created_at', 'size', 'content_type', 'etag', 'deleted')
def mock_replicate_object(part, db, node_id):
# merge new objects between cleave of first and second shard ranges
if not calls:
old_broker.merge_items(
[dict(zip(key, obj)) for obj in new_objects])
calls.append((part, db, node_id))
return True, [True, True, True]
with self._mock_sharder() as sharder:
sharder._audit_container = mock.MagicMock()
sharder._replicate_object = mock_replicate_object
sharder._process_broker(broker, node, 99)
# sanity check - the new objects merged into the old db
self.assertFalse(broker.get_objects())
self.assertEqual(12, len(old_broker.get_objects()))
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
self.assertEqual([(0, expected_shard_dbs[0], 0),
(0, expected_shard_dbs[1], 0)], calls)
# check shard ranges were updated to CLEAVED
updated_shard_ranges = broker.get_shard_ranges()
# 'alpha' was not in table when first shard was cleaved
shard_ranges[0].bytes_used = 5
shard_ranges[0].object_count = 5
shard_ranges[0].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
self._check_objects(objects[:5], expected_shard_dbs[0])
# 'zeta' was in table when second shard was cleaved
shard_ranges[1].bytes_used = 6
shard_ranges[1].object_count = 6
shard_ranges[1].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[1], updated_shard_ranges[1])
self._check_objects(objects[5:] + new_objects[1:],
expected_shard_dbs[1])
context = CleavingContext.load(broker)
self.assertFalse(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.done())
self.assertEqual('', context.cursor)
self.assertEqual(10, context.cleave_to_row)
self.assertEqual(12, context.max_row) # note that max row increased
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
self.assertEqual(
'sharder.sharding.move_misplaced',
self.logger.statsd_client.calls['timing_since'][-3][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-3][0][1], 0)
self.assertEqual(
'sharder.sharding.set_state',
self.logger.statsd_client.calls['timing_since'][-2][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-2][0][1], 0)
self.assertEqual(
'sharder.sharding.cleave',
self.logger.statsd_client.calls['timing_since'][-1][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-1][0][1], 0)
lines = sharder.logger.get_lines_for_level('info')
self.assertEqual(
["Kick off container cleaving, own shard range in state "
"'sharding', path: a/c, db: %s" % broker.db_file,
"Starting to cleave (2 todo), path: a/c, db: %s"
% broker.db_file], lines[:2])
self.assertIn('Completed cleaving, DB remaining in sharding state, '
'path: a/c, db: %s'
% broker.db_file, lines[1:])
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Repeat cleaving required', lines[0])
self.assertFalse(lines[1:])
unlink_files(expected_shard_dbs)
# repeat the cleaving - the newer objects get cleaved
with self._mock_sharder() as sharder:
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
# this time the sharding completed
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_has_calls(
[mock.call(0, expected_shard_dbs[0], 0),
mock.call(0, expected_shard_dbs[1], 0)])
# shard ranges are now ACTIVE - stats not updated by cleaving
updated_shard_ranges = broker.get_shard_ranges()
shard_ranges[0].state = ShardRange.ACTIVE
self._check_shard_range(shard_ranges[0], updated_shard_ranges[0])
self._check_objects(new_objects[:1], expected_shard_dbs[0])
# both new objects are included in repeat cleaving but no older objects
shard_ranges[1].state = ShardRange.ACTIVE
self._check_shard_range(shard_ranges[1], updated_shard_ranges[1])
self._check_objects(new_objects[1:], expected_shard_dbs[1])
lines = sharder.logger.get_lines_for_level('info')
self.assertEqual(
'Starting to cleave (2 todo), path: a/c, db: %s'
% broker.db_file, lines[0])
self.assertIn(
'Completed cleaving, DB set to sharded state, path: a/c, db: %s'
% broker.db_file, lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
self.assertEqual(
'sharder.sharding.move_misplaced',
self.logger.statsd_client.calls['timing_since'][-4][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-4][0][1], 0)
self.assertEqual(
'sharder.sharding.cleave',
self.logger.statsd_client.calls['timing_since'][-3][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-3][0][1], 0)
self.assertEqual(
'sharder.sharding.completed',
self.logger.statsd_client.calls['timing_since'][-2][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-2][0][1], 0)
self.assertEqual(
'sharder.sharding.send_sr',
self.logger.statsd_client.calls['timing_since'][-1][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-1][0][1], 0)
def test_cleave_timing_metrics(self):
broker = self._make_broker()
objects = [{'name': 'obj_%03d' % i,
'created_at': Timestamp.now().normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': i % 2,
'storage_policy_index': 0,
} for i in range(1, 8)]
broker.merge_items([dict(obj) for obj in objects])
broker.enable_sharding(Timestamp.now())
shard_ranges = self._make_shard_ranges(
(('', 'obj_004'), ('obj_004', '')), state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
with self._mock_sharder() as sharder:
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
lines = sharder.logger.get_lines_for_level('info')
self.assertEqual(
'Starting to cleave (2 todo), path: a/c, db: %s'
% broker.db_file, lines[0])
self.assertIn(
'Completed cleaving, DB set to sharded state, path: a/c, db: %s'
% broker.db_file, lines[1:])
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
self.assertEqual(
'sharder.sharding.move_misplaced',
self.logger.statsd_client.calls['timing_since'][-4][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-4][0][1], 0)
self.assertEqual(
'sharder.sharding.cleave',
self.logger.statsd_client.calls['timing_since'][-3][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-3][0][1], 0)
self.assertEqual(
'sharder.sharding.completed',
self.logger.statsd_client.calls['timing_since'][-2][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-2][0][1], 0)
self.assertEqual(
'sharder.sharding.send_sr',
self.logger.statsd_client.calls['timing_since'][-1][0][0])
self.assertGreater(
self.logger.statsd_client.calls['timing_since'][-1][0][1], 0)
# check shard ranges were updated to ACTIVE
self.assertEqual([ShardRange.ACTIVE] * 2,
[sr.state for sr in broker.get_shard_ranges()])
shard_broker = ContainerBroker(expected_shard_dbs[0])
actual_objects = shard_broker.get_objects()
self.assertEqual(objects[:4], actual_objects)
shard_broker = ContainerBroker(expected_shard_dbs[1])
actual_objects = shard_broker.get_objects()
self.assertEqual(objects[4:], actual_objects)
def test_cleave_multiple_storage_policies(self):
# verify that objects in all storage policies are cleaved
broker = self._make_broker()
# add objects in multiple policies
objects = [{'name': 'obj_%03d' % i,
'created_at': Timestamp.now().normal,
'content_type': 'text/plain',
'etag': 'etag_%d' % i,
'size': 1024 * i,
'deleted': i % 2,
'storage_policy_index': i % 2,
} for i in range(1, 8)]
# merge_items mutates items
broker.merge_items([dict(obj) for obj in objects])
broker.enable_sharding(Timestamp.now())
shard_ranges = self._make_shard_ranges(
(('', 'obj_004'), ('obj_004', '')), state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
with self._mock_sharder() as sharder:
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
# check shard ranges were updated to ACTIVE
self.assertEqual([ShardRange.ACTIVE] * 2,
[sr.state for sr in broker.get_shard_ranges()])
shard_broker = ContainerBroker(expected_shard_dbs[0])
actual_objects = shard_broker.get_objects()
self.assertEqual(objects[:4], actual_objects)
shard_broker = ContainerBroker(expected_shard_dbs[1])
actual_objects = shard_broker.get_objects()
self.assertEqual(objects[4:], actual_objects)
def test_cleave_insufficient_replication(self):
# verify that if replication of a cleaved shard range fails then rows
# are not merged again to the existing shard db
broker = self._make_broker()
retiring_db_id = broker.get_info()['id']
objects = [
{'name': 'obj%03d' % i, 'created_at': next(self.ts_iter),
'size': 1, 'content_type': 'text/plain', 'etag': 'etag',
'deleted': 0, 'storage_policy_index': 0}
for i in range(10)
]
broker.merge_items([dict(obj) for obj in objects])
broker._commit_puts()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'obj004'), ('obj004', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
new_object = {'name': 'alpha', 'created_at': next(self.ts_iter),
'size': 0, 'content_type': 'text/plain', 'etag': 'etag',
'deleted': 0, 'storage_policy_index': 0}
broker.merge_items([dict(new_object)])
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
orig_merge_items = ContainerBroker.merge_items
def mock_merge_items(broker, items):
merge_items_calls.append((broker.path,
# merge mutates item so make a copy
[dict(item) for item in items]))
orig_merge_items(broker, items)
# first shard range cleaved but fails to replicate
merge_items_calls = []
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
with self._mock_sharder() as sharder:
sharder._replicate_object = mock.MagicMock(
return_value=(False, [False, False, True]))
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
# first shard range cleaved to shard broker
self.assertEqual([(shard_ranges[0].name, objects[:5])],
merge_items_calls)
# replication of first shard range fails - no more shards attempted
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[0], 0)
# shard broker has sync points
shard_broker = ContainerBroker(expected_shard_dbs[0])
self.assertEqual(
[{'remote_id': retiring_db_id, 'sync_point': len(objects)}],
shard_broker.get_syncs())
self.assertEqual(objects[:5], shard_broker.get_objects())
# first shard range replicates ok, no new merges required, second is
# cleaved but fails to replicate
merge_items_calls = []
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items), self._mock_sharder() as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, True, True]),
(False, [False, False, True])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
broker_shard_ranges = broker.get_shard_ranges()
shard_ranges[0].object_count = 5
shard_ranges[0].bytes_used = sum(obj['size'] for obj in objects[:5])
shard_ranges[0].state = ShardRange.CLEAVED
self._check_shard_range(shard_ranges[0], broker_shard_ranges[0])
# second shard range still in created state
self._assert_shard_ranges_equal([shard_ranges[1]],
[broker_shard_ranges[1]])
# only second shard range rows were merged to shard db
self.assertEqual([(shard_ranges[1].name, objects[5:])],
merge_items_calls)
sharder._replicate_object.assert_has_calls(
[mock.call(0, expected_shard_dbs[0], 0),
mock.call(0, expected_shard_dbs[1], 0)])
# shard broker has sync points
shard_broker = ContainerBroker(expected_shard_dbs[1])
self.assertEqual(
[{'remote_id': retiring_db_id, 'sync_point': len(objects)}],
shard_broker.get_syncs())
self.assertEqual(objects[5:], shard_broker.get_objects())
# repeat - second shard range cleaves fully because its previously
# cleaved shard db no longer exists
unlink_files(expected_shard_dbs)
merge_items_calls = []
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
with self._mock_sharder() as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(True, [True, True, True]), # misplaced obj
(False, [False, True, True])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
broker_shard_ranges = broker.get_shard_ranges()
shard_ranges[1].object_count = 5
shard_ranges[1].bytes_used = sum(obj['size'] for obj in objects[5:])
shard_ranges[1].state = ShardRange.ACTIVE
self._check_shard_range(shard_ranges[1], broker_shard_ranges[1])
# second shard range rows were merged to shard db again
self.assertEqual([(shard_ranges[0].name, [new_object]),
(shard_ranges[1].name, objects[5:])],
merge_items_calls)
sharder._replicate_object.assert_has_calls(
[mock.call(0, expected_shard_dbs[0], 0),
mock.call(0, expected_shard_dbs[1], 0)])
# first shard broker was created by misplaced object - no sync point
shard_broker = ContainerBroker(expected_shard_dbs[0])
self.assertFalse(shard_broker.get_syncs())
self.assertEqual([new_object], shard_broker.get_objects())
# second shard broker has sync points
shard_broker = ContainerBroker(expected_shard_dbs[1])
self.assertEqual(
[{'remote_id': retiring_db_id, 'sync_point': len(objects)}],
shard_broker.get_syncs())
self.assertEqual(objects[5:], shard_broker.get_objects())
def test_shard_replication_quorum_failures(self):
broker = self._make_broker()
objects = [
{'name': 'obj%03d' % i, 'created_at': next(self.ts_iter),
'size': 1, 'content_type': 'text/plain', 'etag': 'etag',
'deleted': 0, 'storage_policy_index': 0}
for i in range(10)
]
broker.merge_items([dict(obj) for obj in objects])
broker._commit_puts()
shard_bounds = (('', 'obj002'), ('obj002', 'obj004'),
('obj004', 'obj006'), ('obj006', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.enable_sharding(Timestamp.now())
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
with self._mock_sharder({'shard_replication_quorum': 3}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, True, True]),
(False, [False, False, True])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
# replication of first shard range fails - no more shards attempted
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[0], 0)
self.assertEqual([ShardRange.CREATED] * 4,
[sr.state for sr in broker.get_shard_ranges()])
# and again with a chilled out quorom, so cleaving moves onto second
# shard range which fails to reach even chilled quorum
with self._mock_sharder({'shard_replication_quorum': 1}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, False, True]),
(False, [False, False, False])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
self.assertEqual(sharder._replicate_object.call_args_list, [
mock.call(0, expected_shard_dbs[0], 0),
mock.call(0, expected_shard_dbs[1], 0),
])
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.CREATED,
ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
# now pretend another node successfully cleaved the second shard range,
# but this node still fails to replicate so still cannot move on
shard_ranges[1].update_state(ShardRange.CLEAVED)
broker.merge_shard_ranges(shard_ranges[1])
with self._mock_sharder({'shard_replication_quorum': 1}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, False, False])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED,
ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
# until a super-chilled quorum is used - but even then there must have
# been an attempt to replicate
with self._mock_sharder(
{'shard_replication_quorum': 1,
'existing_shard_replication_quorum': 0}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [])]) # maybe shard db was deleted
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
self.assertEqual(
[ShardRange.CLEAVED, ShardRange.CLEAVED, ShardRange.CREATED,
ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
# next pass - the second shard replication is attempted and fails, but
# that's ok because another node has cleaved it and
# existing_shard_replication_quorum is zero
with self._mock_sharder(
{'shard_replication_quorum': 1,
'existing_shard_replication_quorum': 0}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, False, False]),
(False, [False, True, False])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
self.assertEqual(sharder._replicate_object.call_args_list, [
mock.call(0, expected_shard_dbs[1], 0),
mock.call(0, expected_shard_dbs[2], 0),
])
self.assertEqual([ShardRange.CLEAVED] * 3 + [ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
self.assertEqual(1, sharder.shard_replication_quorum)
self.assertEqual(0, sharder.existing_shard_replication_quorum)
# crazy replication quorums will be capped to replica_count
with self._mock_sharder(
{'shard_replication_quorum': 99,
'existing_shard_replication_quorum': 99}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(False, [False, True, True])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[3], 0)
self.assertEqual([ShardRange.CLEAVED] * 3 + [ShardRange.CREATED],
[sr.state for sr in broker.get_shard_ranges()])
self.assertEqual(3, sharder.shard_replication_quorum)
self.assertEqual(3, sharder.existing_shard_replication_quorum)
# ...and progress is still made if replication fully succeeds
with self._mock_sharder(
{'shard_replication_quorum': 99,
'existing_shard_replication_quorum': 99}) as sharder:
sharder._replicate_object = mock.MagicMock(
side_effect=[(True, [True, True, True])])
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[3], 0)
self.assertEqual([ShardRange.ACTIVE] * 4,
[sr.state for sr in broker.get_shard_ranges()])
warnings = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'shard_replication_quorum of 99 exceeds replica count',
warnings[0])
self.assertIn(
'existing_shard_replication_quorum of 99 exceeds replica count',
warnings[1])
self.assertEqual(3, sharder.shard_replication_quorum)
self.assertEqual(3, sharder.existing_shard_replication_quorum)
def test_cleave_to_existing_shard_db(self):
# verify that when cleaving to an already existing shard db
def replicate(node, from_broker, part):
# short circuit replication
rpc = replicator.ContainerReplicatorRpc(
self.tempdir, DATADIR, ContainerBroker, mount_check=False)
fake_repl_connection = attach_fake_replication_rpc(rpc)
with mock.patch('swift.common.db_replicator.ReplConnection',
fake_repl_connection):
with mock.patch('swift.common.db_replicator.ring.Ring',
lambda *args, **kwargs: FakeRing()):
daemon = replicator.ContainerReplicator({})
info = from_broker.get_replication_info()
success = daemon._repl_to_node(
node, from_broker, part, info)
self.assertTrue(success)
orig_merge_items = ContainerBroker.merge_items
def mock_merge_items(broker, items):
# capture merge_items calls
merge_items_calls.append((broker.path,
# merge mutates item so make a copy
[dict(item) for item in items]))
orig_merge_items(broker, items)
objects = [
{'name': 'obj%03d' % i, 'created_at': next(self.ts_iter),
'size': 1, 'content_type': 'text/plain', 'etag': 'etag',
'deleted': 0, 'storage_policy_index': 0}
for i in range(10)
]
# local db gets 4 objects
local_broker = self._make_broker()
local_broker.merge_items([dict(obj) for obj in objects[2:6]])
local_broker._commit_puts()
local_retiring_db_id = local_broker.get_info()['id']
# remote db gets 5 objects
remote_broker = self._make_broker(device='sdb')
remote_broker.merge_items([dict(obj) for obj in objects[2:7]])
remote_broker._commit_puts()
remote_retiring_db_id = remote_broker.get_info()['id']
local_node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda',
'id': '2', 'index': 0, 'replication_ip': '1.2.3.4',
'replication_port': 6040}
remote_node = {'ip': '1.2.3.5', 'port': 6040, 'device': 'sdb',
'id': '3', 'index': 1, 'replication_ip': '1.2.3.5',
'replication_port': 6040}
# remote db replicates to local, bringing local db's total to 5 objects
self.assertNotEqual(local_broker.get_objects(),
remote_broker.get_objects())
replicate(local_node, remote_broker, 0)
self.assertEqual(local_broker.get_objects(),
remote_broker.get_objects())
# local db gets 2 new objects, bringing its total to 7
local_broker.merge_items([dict(obj) for obj in objects[1:2]])
local_broker.merge_items([dict(obj) for obj in objects[7:8]])
# local db gets shard ranges
own_shard_range = local_broker.get_own_shard_range()
now = Timestamp.now()
own_shard_range.update_state(ShardRange.SHARDING, state_timestamp=now)
own_shard_range.epoch = now
shard_ranges = self._make_shard_ranges(
(('', 'obj004'), ('obj004', '')), state=ShardRange.CREATED)
local_broker.merge_shard_ranges([own_shard_range] + shard_ranges)
self.assertTrue(local_broker.set_sharding_state())
# local db shards
merge_items_calls = []
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
with self._mock_sharder() as sharder:
sharder._replicate_object = mock.MagicMock(
return_value=(True, [True, True, True]))
sharder._audit_container = mock.MagicMock()
sharder._process_broker(local_broker, local_node, 0)
# all objects merged from local to shard ranges
self.assertEqual([(shard_ranges[0].name, objects[1:5]),
(shard_ranges[1].name, objects[5:8])],
merge_items_calls)
# shard brokers have sync points
expected_shard_dbs = []
for shard_range in shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
shard_broker = ContainerBroker(expected_shard_dbs[0])
self.assertEqual(
[{'remote_id': local_retiring_db_id, 'sync_point': 7},
{'remote_id': remote_retiring_db_id, 'sync_point': 5}],
shard_broker.get_syncs())
self.assertEqual(objects[1:5], shard_broker.get_objects())
shard_broker = ContainerBroker(expected_shard_dbs[1])
self.assertEqual(
[{'remote_id': local_retiring_db_id, 'sync_point': 7},
{'remote_id': remote_retiring_db_id, 'sync_point': 5}],
shard_broker.get_syncs())
self.assertEqual(objects[5:8], shard_broker.get_objects())
# local db replicates to remote, so remote now has shard ranges
# note: no objects replicated because local is sharded
self.assertFalse(remote_broker.get_shard_ranges())
replicate(remote_node, local_broker, 0)
self._assert_shard_ranges_equal(local_broker.get_shard_ranges(),
remote_broker.get_shard_ranges())
# remote db gets 3 new objects, bringing its total to 8
remote_broker.merge_items([dict(obj) for obj in objects[:1]])
remote_broker.merge_items([dict(obj) for obj in objects[8:]])
merge_items_calls = []
with mock.patch('swift.container.backend.ContainerBroker.merge_items',
mock_merge_items):
with self._mock_sharder() as sharder:
sharder._replicate_object = mock.MagicMock(
return_value=(True, [True, True, True]))
sharder._audit_container = mock.MagicMock()
sharder._process_broker(remote_broker, remote_node, 0)
# shard brokers have sync points for the remote db so only new objects
# are merged from remote broker to shard brokers
self.assertEqual([(shard_ranges[0].name, objects[:1]),
(shard_ranges[1].name, objects[8:])],
merge_items_calls)
# sync points are updated
shard_broker = ContainerBroker(expected_shard_dbs[0])
self.assertEqual(
[{'remote_id': local_retiring_db_id, 'sync_point': 7},
{'remote_id': remote_retiring_db_id, 'sync_point': 8}],
shard_broker.get_syncs())
self.assertEqual(objects[:5], shard_broker.get_objects())
shard_broker = ContainerBroker(expected_shard_dbs[1])
self.assertEqual(
[{'remote_id': local_retiring_db_id, 'sync_point': 7},
{'remote_id': remote_retiring_db_id, 'sync_point': 8}],
shard_broker.get_syncs())
self.assertEqual(objects[5:], shard_broker.get_objects())
def test_cleave_skips_shrinking_and_stops_at_found(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'b'),
('b', 'c'),
('b', 'd'),
('d', 'f'),
('f', ''))
# make sure there is an object in every shard range so cleaving will
# occur in batches of 2
objects = [
('a', self.ts_encoded(), 10, 'text/plain', 'etag_a', 0, 0),
('b', self.ts_encoded(), 10, 'text/plain', 'etag_b', 0, 0),
('c', self.ts_encoded(), 1, 'text/plain', 'etag_c', 0, 0),
('d', self.ts_encoded(), 2, 'text/plain', 'etag_d', 0, 0),
('e', self.ts_encoded(), 3, 'text/plain', 'etag_e', 0, 0),
('f', self.ts_encoded(), 100, 'text/plain', 'etag_f', 0, 0),
('x', self.ts_encoded(), 0, '', '', 1, 0), # deleted
('z', self.ts_encoded(), 1000, 'text/plain', 'etag_z', 0, 0)
]
for obj in objects:
broker.put_object(*obj)
shard_ranges = self._make_shard_ranges(
shard_bounds, state=[ShardRange.CREATED,
ShardRange.SHRINKING,
ShardRange.CREATED,
ShardRange.CREATED,
ShardRange.FOUND])
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
# run cleave - first batch is cleaved, shrinking range doesn't count
# towards batch size of 2 nor towards ranges_done
with self._mock_sharder() as sharder:
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertEqual(shard_ranges[2].upper_str, context.cursor)
self.assertEqual(2, context.ranges_done)
self.assertEqual(2, context.ranges_todo)
# run cleave - stops at shard range in FOUND state
with self._mock_sharder() as sharder:
self.assertFalse(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertFalse(context.cleaving_done)
self.assertEqual(shard_ranges[3].upper_str, context.cursor)
self.assertEqual(3, context.ranges_done)
self.assertEqual(1, context.ranges_todo)
# run cleave - final shard range in CREATED state, cleaving proceeds
shard_ranges[4].update_state(ShardRange.CREATED,
state_timestamp=Timestamp.now())
broker.merge_shard_ranges(shard_ranges[4:])
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(shard_ranges[4].upper_str, context.cursor)
self.assertEqual(4, context.ranges_done)
self.assertEqual(0, context.ranges_todo)
def test_cleave_shrinking_to_active_root_range(self):
broker = self._make_shrinking_broker(account='.shards_a',
container='shard_c')
deleted_range = ShardRange(
'.shards/other', next(self.ts_iter), 'here', 'there', deleted=True,
state=ShardRange.SHRUNK, epoch=next(self.ts_iter))
# root is the acceptor...
root = ShardRange(
'a/c', next(self.ts_iter), '', '',
state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
broker.merge_shard_ranges([deleted_range, root])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container()) # sanity check
# expect cleave to the root
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(root.upper_str, context.cursor)
self.assertEqual(1, context.ranges_done)
self.assertEqual(0, context.ranges_todo)
def test_cleave_shrinking_to_active_acceptor_with_sharded_root_range(self):
broker = self._make_broker(account='.shards_a', container='shard_c')
broker.put_object(
'here_a', next(self.ts_iter), 10, 'text/plain', 'etag_a', 0, 0)
own_shard_range = ShardRange(
broker.path, next(self.ts_iter), 'here', 'there',
state=ShardRange.SHARDING, epoch=next(self.ts_iter))
# the intended acceptor...
acceptor = ShardRange(
'.shards_a/shard_d', next(self.ts_iter), 'here', '',
state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
# root range also gets pulled from root during audit...
root = ShardRange(
'a/c', next(self.ts_iter), '', '',
state=ShardRange.SHARDED, epoch=next(self.ts_iter))
broker.merge_shard_ranges([own_shard_range, acceptor, root])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container()) # sanity check
self.assertTrue(broker.set_sharding_state())
# sharded root range should always sort after an active acceptor so
# expect cleave to acceptor first then cleaving completes
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(acceptor.upper_str, context.cursor)
self.assertEqual(1, context.ranges_done) # cleaved the acceptor
self.assertEqual(1, context.ranges_todo) # never reached sharded root
def test_cleave_shrinking_to_active_root_range_with_active_acceptor(self):
# if shrinking shard has both active root and active other acceptor,
# verify that shard only cleaves to one of them;
# root will sort before acceptor if acceptor.upper==MAX
objects = (
('here_a', next(self.ts_iter), 10, 'text/plain', 'etag_a', 0, 0),)
broker = self._make_shrinking_broker(objects=objects)
# active acceptor with upper bound == MAX
acceptor = ShardRange(
'.shards/other', next(self.ts_iter), 'here', '', deleted=False,
state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
# root is also active
root = ShardRange(
'a/c', next(self.ts_iter), '', '',
state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
broker.merge_shard_ranges([acceptor, root])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertFalse(broker.is_root_container()) # sanity check
# expect cleave to the root
acceptor.upper = ''
acceptor.timestamp = next(self.ts_iter)
broker.merge_shard_ranges([acceptor])
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(root.upper_str, context.cursor)
self.assertEqual(1, context.ranges_done)
self.assertEqual(1, context.ranges_todo)
info = [
line for line in self.logger.get_lines_for_level('info')
if line.startswith('Replicating new shard container a/c')
]
self.assertEqual(1, len(info))
def test_cleave_shrinking_to_active_acceptor_with_active_root_range(self):
# if shrinking shard has both active root and active other acceptor,
# verify that shard only cleaves to one of them;
# root will sort after acceptor if acceptor.upper<MAX
objects = (
('here_a', next(self.ts_iter), 10, 'text/plain', 'etag_a', 0, 0),)
broker = self._make_shrinking_broker(objects=objects)
# active acceptor with upper bound < MAX
acceptor = ShardRange(
'.shards/other', next(self.ts_iter), 'here', 'where',
deleted=False, state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
# root is also active
root = ShardRange(
'a/c', next(self.ts_iter), '', '',
state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
broker.merge_shard_ranges([acceptor, root])
# expect cleave to the acceptor
with self._mock_sharder() as sharder:
self.assertTrue(sharder._cleave(broker))
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
self.assertTrue(context.cleaving_done)
self.assertEqual(acceptor.upper_str, context.cursor)
self.assertEqual(1, context.ranges_done)
self.assertEqual(1, context.ranges_todo)
info = [
line for line in self.logger.get_lines_for_level('info')
if line.startswith('Replicating new shard container .shards/other')
]
self.assertEqual(1, len(info))
def _check_not_complete_sharding(self, broker):
with self._mock_sharder() as sharder:
self.assertFalse(sharder._complete_sharding(broker))
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Repeat cleaving required', warning_lines[0])
self.assertFalse(warning_lines[1:])
sharder.logger.clear()
context = CleavingContext.load(broker)
self.assertFalse(context.cleaving_done)
self.assertFalse(context.misplaced_done)
self.assertEqual('', context.cursor)
self.assertEqual(ShardRange.SHARDING,
broker.get_own_shard_range().state)
for shard_range in broker.get_shard_ranges():
self.assertEqual(ShardRange.CLEAVED, shard_range.state)
self.assertEqual(SHARDING, broker.get_db_state())
def _check_complete_sharding(self, account, container, shard_bounds):
broker = self._make_sharding_broker(
account=account, container=container, shard_bounds=shard_bounds)
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_items([obj])
self.assertEqual(2, len(broker.db_files)) # sanity check
# no cleave context progress
self._check_not_complete_sharding(broker)
# cleaving_done is False
context = CleavingContext.load(broker)
self.assertEqual(1, context.max_row)
context.cleave_to_row = 1 # pretend all rows have been cleaved
context.cleaving_done = False
context.misplaced_done = True
context.store(broker)
self._check_not_complete_sharding(broker)
# misplaced_done is False
context.misplaced_done = False
context.cleaving_done = True
context.store(broker)
self._check_not_complete_sharding(broker)
# modified db max row
old_broker = broker.get_brokers()[0]
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 1}
old_broker.merge_items([obj])
self.assertGreater(old_broker.get_max_row(), context.max_row)
context.misplaced_done = True
context.cleaving_done = True
context.store(broker)
self._check_not_complete_sharding(broker)
# db id changes
broker.get_brokers()[0].newid('fake_remote_id')
context.cleave_to_row = 2 # pretend all rows have been cleaved, again
context.store(broker)
self._check_not_complete_sharding(broker)
# context ok
context = CleavingContext.load(broker)
context.cleave_to_row = context.max_row
context.misplaced_done = True
context.cleaving_done = True
context.store(broker)
with self._mock_sharder() as sharder:
self.assertTrue(sharder._complete_sharding(broker))
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
for shard_range in broker.get_shard_ranges():
self.assertEqual(ShardRange.ACTIVE, shard_range.state)
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertFalse(warning_lines)
sharder.logger.clear()
return broker
def test_complete_sharding_root(self):
broker = self._check_complete_sharding(
'a', 'c', (('', 'mid'), ('mid', '')))
self.assertEqual(0, broker.get_own_shard_range().deleted)
def test_complete_sharding_shard(self):
broker = self._check_complete_sharding(
'.shards_', 'shard_c', (('l', 'mid'), ('mid', 'u')))
self.assertEqual(1, broker.get_own_shard_range().deleted)
def test_complete_sharding_missing_own_shard_range(self):
broker = self._make_sharding_broker()
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_items([obj])
self.assertEqual(2, len(broker.db_files)) # sanity check
# Make cleaving context_done
context = CleavingContext.load(broker)
self.assertEqual(1, context.max_row)
context.cleave_to_row = 1 # pretend all rows have been cleaved
context.cleaving_done = True
context.misplaced_done = True
context.store(broker)
with self._mock_sharder() as sharder, mock.patch(
'swift.container.backend.ContainerBroker.get_own_shard_range',
return_value=None):
self.assertFalse(sharder._complete_sharding(broker))
self.assertEqual(SHARDING, broker.get_db_state())
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(warning_lines[0],
'Failed to get own_shard_range, path: a/c, db: %s'
% broker.db_file)
def test_sharded_record_sharding_progress_missing_contexts(self):
broker = self._check_complete_sharding(
'a', 'c', (('', 'mid'), ('mid', '')))
with self._mock_sharder() as sharder:
with mock.patch.object(sharder, '_append_stat') as mocked:
sharder._record_sharding_progress(broker, {}, None)
mocked.assert_called_once_with('sharding_in_progress', 'all', mock.ANY)
# clear the contexts then run _record_sharding_progress
for context, _ in CleavingContext.load_all(broker):
context.delete(broker)
with self._mock_sharder() as sharder:
with mock.patch.object(sharder, '_append_stat') as mocked:
sharder._record_sharding_progress(broker, {}, None)
mocked.assert_not_called()
def test_incomplete_sharding_progress_warning_log(self):
# test to verify sharder will print warning logs if sharding has been
# taking too long.
broker = self._make_sharding_broker(
'a', 'c', (('', 'mid'), ('mid', '')))
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_items([obj])
self.assertEqual(2, len(broker.db_files))
# sharding is not complete due to no cleave context progress.
self._check_not_complete_sharding(broker)
own_shard_range = broker.get_own_shard_range()
# advance time but still within 'container_sharding_timeout'.
future_time = 10000 + float(own_shard_range.epoch)
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
future_time = 172800 + float(own_shard_range.epoch)
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# advance time beyond 'container_sharding_timeout'.
future_time = 172800 + float(own_shard_range.epoch) + 1
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'Cleaving has not completed in %.2f seconds since %s. DB state: '
'sharding' % (future_time - float(own_shard_range.epoch),
own_shard_range.epoch.isoformat),
warning_lines[0])
def test_incomplete_shrinking_progress_warning_log(self):
# test to verify sharder will print warning logs if shrinking has been
# taking too long.
broker = self._make_shrinking_broker()
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_items([obj])
# active acceptor with upper bound < MAX
acceptor = ShardRange(
'.shards/other', next(self.ts_iter), 'here', 'where',
deleted=False, state=ShardRange.ACTIVE, epoch=next(self.ts_iter))
broker.merge_shard_ranges([acceptor])
context = CleavingContext.load(broker)
self.assertFalse(context.cleaving_done)
own_shard_range = broker.get_own_shard_range()
# advance time but still within 'container_sharding_timeout'.
future_time = 10000 + float(own_shard_range.epoch)
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
future_time = 172800 + float(own_shard_range.epoch)
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# advance time beyond 'container_sharding_timeout'.
future_time = 172800 + float(own_shard_range.epoch) + 1
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._record_sharding_progress(broker, {}, None)
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'Cleaving has not completed in %.2f seconds since %s.' %
(future_time - float(own_shard_range.epoch),
own_shard_range.epoch.isoformat),
warning_lines[0])
def test_identify_sharding_old_style_candidate(self):
brokers = [self._make_broker(container='c%03d' % i) for i in range(6)]
for broker in brokers:
broker.set_sharding_sysmeta('Root', 'a/c')
node = {'index': 2}
# containers are all empty
with self._mock_sharder() as sharder:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
expected_stats = {}
self._assert_stats(expected_stats, sharder, 'sharding_candidates')
objects = [
['obj%3d' % i, next(self.ts_iter).internal, i, 'text/plain',
'etag%s' % i, 0] for i in range(160)]
# one container has 100 objects, which is below the sharding threshold
for obj in objects[:100]:
brokers[0].put_object(*obj)
conf = {'recon_cache_path': self.tempdir}
with self._mock_sharder(conf=conf) as sharder:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
self.assertFalse(sharder.sharding_candidates)
expected_recon = {
'found': 0,
'top': []}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# reduce the sharding threshold and the container is reported
conf = {'shard_container_threshold': 100,
'recon_cache_path': self.tempdir}
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now() as now:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
stats_0 = {'path': brokers[0].db_file,
'node_index': 2,
'account': 'a',
'container': 'c000',
'root': 'a/c',
'object_count': 100,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[0].db_file).st_size}
self.assertEqual([stats_0], sharder.sharding_candidates)
expected_recon = {
'found': 1,
'top': [stats_0]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
def test_identify_sharding_candidate(self):
brokers = [self._make_broker(container='c%03d' % i) for i in range(6)]
for broker in brokers:
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
node = {'index': 2}
# containers are all empty
with self._mock_sharder() as sharder:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
expected_stats = {}
self._assert_stats(expected_stats, sharder, 'sharding_candidates')
objects = [
['obj%3d' % i, next(self.ts_iter).internal, i, 'text/plain',
'etag%s' % i, 0] for i in range(160)]
# one container has 100 objects, which is below the sharding threshold
for obj in objects[:100]:
brokers[0].put_object(*obj)
conf = {'recon_cache_path': self.tempdir}
with self._mock_sharder(conf=conf) as sharder:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
self.assertFalse(sharder.sharding_candidates)
expected_recon = {
'found': 0,
'top': []}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# reduce the sharding threshold and the container is reported
conf = {'shard_container_threshold': 100,
'recon_cache_path': self.tempdir}
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now() as now:
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
stats_0 = {'path': brokers[0].db_file,
'node_index': 2,
'account': 'a',
'container': 'c000',
'root': 'a/c',
'object_count': 100,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[0].db_file).st_size}
self.assertEqual([stats_0], sharder.sharding_candidates)
expected_recon = {
'found': 1,
'top': [stats_0]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# repeat with handoff node and db_file error
with self._mock_sharder(conf=conf) as sharder:
with mock.patch('os.stat', side_effect=OSError('test error')):
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, {})
stats_0_b = {'path': brokers[0].db_file,
'node_index': None,
'account': 'a',
'container': 'c000',
'root': 'a/c',
'object_count': 100,
'meta_timestamp': now.internal,
'file_size': None}
self.assertEqual([stats_0_b], sharder.sharding_candidates)
self._assert_stats(expected_stats, sharder, 'sharding_candidates')
expected_recon = {
'found': 1,
'top': [stats_0_b]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# load up another container, but not to threshold for sharding, and
# verify it is never a candidate for sharding
for obj in objects[:50]:
brokers[2].put_object(*obj)
own_sr = brokers[2].get_own_shard_range()
for state in ShardRange.STATES:
own_sr.update_state(state, state_timestamp=Timestamp.now())
brokers[2].merge_shard_ranges([own_sr])
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
with annotate_failure(state):
self.assertEqual([stats_0], sharder.sharding_candidates)
# reduce the threshold and the second container is included
conf = {'shard_container_threshold': 50,
'recon_cache_path': self.tempdir}
own_sr.update_state(ShardRange.ACTIVE, state_timestamp=Timestamp.now())
brokers[2].merge_shard_ranges([own_sr])
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
stats_2 = {'path': brokers[2].db_file,
'node_index': 2,
'account': 'a',
'container': 'c002',
'root': 'a/c',
'object_count': 50,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[2].db_file).st_size}
self.assertEqual([stats_0, stats_2], sharder.sharding_candidates)
expected_recon = {
'found': 2,
'top': [stats_0, stats_2]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# a broker not in active state is not included
own_sr = brokers[0].get_own_shard_range()
for state in ShardRange.STATES:
if state == ShardRange.ACTIVE:
continue
own_sr.update_state(state, state_timestamp=Timestamp.now())
brokers[0].merge_shard_ranges([own_sr])
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
with annotate_failure(state):
self.assertEqual([stats_2], sharder.sharding_candidates)
own_sr.update_state(ShardRange.ACTIVE, state_timestamp=Timestamp.now())
brokers[0].merge_shard_ranges([own_sr])
# load up a third container with 150 objects
for obj in objects[:150]:
brokers[5].put_object(*obj)
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
stats_5 = {'path': brokers[5].db_file,
'node_index': 2,
'account': 'a',
'container': 'c005',
'root': 'a/c',
'object_count': 150,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[5].db_file).st_size}
self.assertEqual([stats_0, stats_2, stats_5],
sharder.sharding_candidates)
# note recon top list is sorted by size
expected_recon = {
'found': 3,
'top': [stats_5, stats_0, stats_2]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# restrict the number of reported candidates
conf = {'shard_container_threshold': 50,
'recon_cache_path': self.tempdir,
'recon_candidates_limit': 2}
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
self.assertEqual([stats_0, stats_2, stats_5],
sharder.sharding_candidates)
expected_recon = {
'found': 3,
'top': [stats_5, stats_0]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
# unrestrict the number of reported candidates
conf = {'shard_container_threshold': 50,
'recon_cache_path': self.tempdir,
'recon_candidates_limit': -1}
for i, broker in enumerate([brokers[1]] + brokers[3:5]):
for obj in objects[:(151 + i)]:
broker.put_object(*obj)
with self._mock_sharder(conf=conf) as sharder:
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
stats_4 = {'path': brokers[4].db_file,
'node_index': 2,
'account': 'a',
'container': 'c004',
'root': 'a/c',
'object_count': 153,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[4].db_file).st_size}
stats_3 = {'path': brokers[3].db_file,
'node_index': 2,
'account': 'a',
'container': 'c003',
'root': 'a/c',
'object_count': 152,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[3].db_file).st_size}
stats_1 = {'path': brokers[1].db_file,
'node_index': 2,
'account': 'a',
'container': 'c001',
'root': 'a/c',
'object_count': 151,
'meta_timestamp': now.internal,
'file_size': os.stat(brokers[1].db_file).st_size}
self.assertEqual(
[stats_0, stats_1, stats_2, stats_3, stats_4, stats_5],
sharder.sharding_candidates)
self._assert_stats(expected_stats, sharder, 'sharding_candidates')
expected_recon = {
'found': 6,
'top': [stats_4, stats_3, stats_1, stats_5, stats_0, stats_2]}
sharder._report_stats()
self._assert_recon_stats(
expected_recon, sharder, 'sharding_candidates')
def test_misplaced_objects_root_container(self):
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
objects = [
# misplaced objects in second and third shard ranges
['n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0],
['there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 1],
['where', self.ts_encoded(), 100, 'text/plain', 'etag_where', 0,
0],
# deleted
['x', self.ts_encoded(), 0, '', '', 1, 1],
]
shard_bounds = (('', 'here'), ('here', 'there'),
('there', 'where'), ('where', 'yonder'),
('yonder', ''))
initial_shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE)
expected_shard_dbs = []
for shard_range in initial_shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(initial_shard_ranges)
# unsharded
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0,
'db_created': 0, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# sharding - no misplaced objects
self.assertTrue(broker.set_sharding_state())
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# pretend we cleaved up to end of second shard range
context = CleavingContext.load(broker)
context.cursor = 'there'
context.store(broker)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# sharding - misplaced objects
for obj in objects:
broker.put_object(*obj)
# pretend we have not cleaved any ranges
context.cursor = ''
context.store(broker)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
self.assertFalse(os.path.exists(expected_shard_dbs[3]))
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
# pretend we cleaved up to end of second shard range
context.cursor = 'there'
context.store(broker)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0,
'db_created': 1, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
2, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_placed'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_db_created'])
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# check misplaced objects were moved
self._check_objects(objects[:2], expected_shard_dbs[1])
# ... and removed from the source db
self._check_objects(objects[2:], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
self.assertFalse(os.path.exists(expected_shard_dbs[3]))
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
# pretend we cleaved up to end of fourth shard range
context.cursor = 'yonder'
context.store(broker)
# and some new misplaced updates arrived in the first shard range
new_objects = [
['b', self.ts_encoded(), 10, 'text/plain', 'etag_b', 0, 0],
['c', self.ts_encoded(), 20, 'text/plain', 'etag_c', 0, 0],
]
for obj in new_objects:
broker.put_object(*obj)
# check that *all* misplaced objects are moved despite exceeding
# the listing limit
with self._mock_sharder(conf={'cleave_row_batch_size': 2}) as sharder:
sharder._move_misplaced_objects(broker)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 4, 'unplaced': 0,
'db_created': 3, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in expected_shard_dbs[2:4]],
any_order=True
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
4, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_placed'])
self.assertEqual(
3, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_db_created'])
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# check misplaced objects were moved
self._check_objects(new_objects, expected_shard_dbs[0])
self._check_objects(objects[:2], expected_shard_dbs[1])
self._check_objects(objects[2:3], expected_shard_dbs[2])
self._check_objects(objects[3:], expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects([], broker.db_file)
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
# pretend we cleaved all ranges - sharded state
self.assertTrue(broker.set_sharded_state())
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0,
'db_created': 0, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_created'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_db_exists'))
# and then more misplaced updates arrive
newer_objects = [
['a-deleted', self.ts_encoded(), 51, 'text/plain', 'etag_a', 1, 0],
['z', self.ts_encoded(), 52, 'text/plain', 'etag_z', 0, 0],
['z-deleted', self.ts_encoded(), 52, 'text/plain', 'etag_z', 1, 0],
]
for obj in newer_objects:
broker.put_object(*obj)
broker.get_info() # force updates to be committed
# sanity check the puts landed in sharded broker
self._check_objects(newer_objects, broker.db_file)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0)
for db in (expected_shard_dbs[0], expected_shard_dbs[-1])],
any_order=True
)
# shard broker for first shard range was already created but not
# removed due to mocked _replicate_object so expect one created and one
# existed db stat...
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 3, 'unplaced': 0,
'db_created': 1, 'db_exists': 1}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
3, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_placed'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_db_created'])
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_db_exists'])
# check new misplaced objects were moved
self._check_objects(newer_objects[:1] + new_objects,
expected_shard_dbs[0])
self._check_objects(newer_objects[1:], expected_shard_dbs[4])
# ... and removed from the source db
self._check_objects([], broker.db_file)
# ... and other shard dbs were unchanged
self._check_objects(objects[:2], expected_shard_dbs[1])
self._check_objects(objects[2:3], expected_shard_dbs[2])
self._check_objects(objects[3:], expected_shard_dbs[3])
def _setup_misplaced_objects(self):
# make a broker with shard ranges, move it to sharded state and then
# put some misplaced objects in it
broker = self._make_broker()
shard_bounds = (('', 'here'), ('here', 'there'),
('there', 'where'), ('where', 'yonder'),
('yonder', ''))
initial_shard_ranges = [
ShardRange('.shards_a/%s-%s' % (lower, upper),
Timestamp.now(), lower, upper, state=ShardRange.ACTIVE)
for lower, upper in shard_bounds
]
expected_dbs = []
for shard_range in initial_shard_ranges:
db_hash = hash_path(shard_range.account, shard_range.container)
expected_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(initial_shard_ranges)
objects = [
# misplaced objects in second, third and fourth shard ranges
['n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0],
['there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 0],
['where', self.ts_encoded(), 100, 'text/plain', 'etag_where', 0,
0],
# deleted
['x', self.ts_encoded(), 0, '', '', 1, 0],
]
broker.enable_sharding(Timestamp.now())
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
for obj in objects:
broker.put_object(*obj)
self.assertEqual(SHARDED, broker.get_db_state())
return broker, objects, expected_dbs
def test_misplaced_objects_newer_objects(self):
# verify that objects merged to the db after misplaced objects have
# been identified are not removed from the db
broker, objects, expected_dbs = self._setup_misplaced_objects()
newer_objects = [
['j', self.ts_encoded(), 51, 'text/plain', 'etag_j', 0, 0],
['k', self.ts_encoded(), 52, 'text/plain', 'etag_k', 1, 0],
]
calls = []
pre_removal_objects = []
def mock_replicate_object(part, db, node_id):
calls.append((part, db, node_id))
if db == expected_dbs[1]:
# put some new objects in the shard range that is being
# replicated before misplaced objects are removed from that
# range in the source db
for obj in newer_objects:
broker.put_object(*obj)
# grab a snapshot of the db contents - a side effect is
# that the newer objects are now committed to the db
pre_removal_objects.extend(
broker.get_objects())
return True, [True, True, True]
with self._mock_sharder(replicas=3) as sharder:
sharder._replicate_object = mock_replicate_object
sharder._move_misplaced_objects(broker)
# sanity check - the newer objects were in the db before the misplaced
# object were removed
for obj in newer_objects:
self.assertIn(obj[0], [o['name'] for o in pre_removal_objects])
for obj in objects[:2]:
self.assertIn(obj[0], [o['name'] for o in pre_removal_objects])
self.assertEqual(
set([(0, db, 0) for db in (expected_dbs[1:4])]), set(calls))
# check misplaced objects were moved
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... but newer objects were not removed from the source db
self._check_objects(newer_objects, broker.db_file)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
# they will be moved on next cycle
unlink_files(expected_dbs)
with self._mock_sharder(replicas=3) as sharder:
sharder._move_misplaced_objects(broker)
self._check_objects(newer_objects, expected_dbs[1])
self._check_objects([], broker.db_file)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
def test_misplaced_objects_db_id_changed(self):
broker, objects, expected_dbs = self._setup_misplaced_objects()
pre_info = broker.get_info()
calls = []
expected_retained_objects = []
expected_retained_objects_dbs = []
def mock_replicate_object(part, db, node_id):
calls.append((part, db, node_id))
if len(calls) == 2:
broker.newid('fake_remote_id')
# grab snapshot of the objects in the broker when it changed id
expected_retained_objects.extend(
self._get_raw_object_records(broker))
if len(calls) >= 2:
expected_retained_objects_dbs.append(db)
return True, [True, True, True]
with self._mock_sharder(replicas=3) as sharder:
sharder._replicate_object = mock_replicate_object
sharder._move_misplaced_objects(broker)
# sanity checks
self.assertNotEqual(pre_info['id'], broker.get_info()['id'])
self.assertTrue(expected_retained_objects)
self.assertEqual(
set([(0, db, 0) for db in (expected_dbs[1:4])]), set(calls))
# check misplaced objects were moved
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... but objects were not removed after the source db id changed
self._check_objects(expected_retained_objects, broker.db_file)
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 1, 'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
lines = sharder.logger.get_lines_for_level('warning')
shard_ranges = broker.get_shard_ranges()
self.assertIn('Refused to remove misplaced objects for dest %s'
% shard_ranges[2], lines[0])
self.assertIn('Refused to remove misplaced objects for dest %s'
% shard_ranges[3], lines[1])
self.assertFalse(lines[2:])
# they will be moved again on next cycle
unlink_files(expected_dbs)
sharder.logger.clear()
with self._mock_sharder(replicas=3) as sharder:
sharder._move_misplaced_objects(broker)
self.assertEqual(2, len(set(expected_retained_objects_dbs)))
for db in expected_retained_objects_dbs:
if db == expected_dbs[1]:
self._check_objects(objects[:2], expected_dbs[1])
if db == expected_dbs[2]:
self._check_objects(objects[2:3], expected_dbs[2])
if db == expected_dbs[3]:
self._check_objects(objects[3:], expected_dbs[3])
self._check_objects([], broker.db_file)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': len(expected_retained_objects),
'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
def test_misplaced_objects_sufficient_replication(self):
broker, objects, expected_dbs = self._setup_misplaced_objects()
with self._mock_sharder(replicas=3) as sharder:
sharder._replicate_object.return_value = (True, [True, True, True])
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in (expected_dbs[2:4])],
any_order=True)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check misplaced objects were moved
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... and removed from the source db
self._check_objects([], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_dbs[0]))
self.assertFalse(os.path.exists(expected_dbs[4]))
def test_misplaced_objects_insufficient_replication_3_replicas(self):
broker, objects, expected_dbs = self._setup_misplaced_objects()
returns = {expected_dbs[1]: (True, [True, True, True]), # ok
expected_dbs[2]: (False, [True, False, False]), # < quorum
expected_dbs[3]: (False, [False, True, True])} # ok
calls = []
def mock_replicate_object(part, db, node_id):
calls.append((part, db, node_id))
return returns[db]
with self._mock_sharder(replicas=3) as sharder:
sharder._replicate_object = mock_replicate_object
sharder._move_misplaced_objects(broker)
self.assertEqual(
set([(0, db, 0) for db in (expected_dbs[1:4])]), set(calls))
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... but only removed from the source db if sufficiently replicated
self._check_objects(objects[2:3], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_dbs[0]))
self.assertFalse(os.path.exists(expected_dbs[4]))
def test_misplaced_objects_insufficient_replication_2_replicas(self):
broker, objects, expected_dbs = self._setup_misplaced_objects()
returns = {expected_dbs[1]: (True, [True, True]), # ok
expected_dbs[2]: (False, [True, False]), # ok
expected_dbs[3]: (False, [False, False])} # < quorum>
calls = []
def mock_replicate_object(part, db, node_id):
calls.append((part, db, node_id))
return returns[db]
with self._mock_sharder(replicas=2) as sharder:
sharder._replicate_object = mock_replicate_object
sharder._move_misplaced_objects(broker)
self.assertEqual(
set([(0, db, 0) for db in (expected_dbs[1:4])]), set(calls))
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... but only removed from the source db if sufficiently replicated
self._check_objects(objects[3:], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_dbs[0]))
self.assertFalse(os.path.exists(expected_dbs[4]))
def test_misplaced_objects_insufficient_replication_4_replicas(self):
broker, objects, expected_dbs = self._setup_misplaced_objects()
returns = {expected_dbs[1]: (False, [True, False, False, False]),
expected_dbs[2]: (True, [True, False, False, True]),
expected_dbs[3]: (False, [False, False, False, False])}
calls = []
def mock_replicate_object(part, db, node_id):
calls.append((part, db, node_id))
return returns[db]
with self._mock_sharder(replicas=4) as sharder:
sharder._replicate_object = mock_replicate_object
sharder._move_misplaced_objects(broker)
self.assertEqual(
set([(0, db, 0) for db in (expected_dbs[1:4])]), set(calls))
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
self._check_objects(objects[3:], expected_dbs[3])
# ... but only removed from the source db if sufficiently replicated
self._check_objects(objects[:2] + objects[3:], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_dbs[0]))
self.assertFalse(os.path.exists(expected_dbs[4]))
def _check_misplaced_objects_shard_container_unsharded(self, conf=None):
broker = self._make_broker(account='.shards_a', container='.shard_c')
ts_shard = next(self.ts_iter)
own_sr = ShardRange(broker.path, ts_shard, 'here', 'where')
broker.merge_shard_ranges([own_sr])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertEqual(own_sr, broker.get_own_shard_range()) # sanity check
self.assertEqual(UNSHARDED, broker.get_db_state())
objects = [
# some of these are misplaced objects
['b', self.ts_encoded(), 2, 'text/plain', 'etag_b', 0, 0],
['here', self.ts_encoded(), 2, 'text/plain', 'etag_here', 0, 0],
['n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0],
['there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 0],
['x', self.ts_encoded(), 0, '', '', 1, 0], # deleted
['y', self.ts_encoded(), 10, 'text/plain', 'etag_y', 0, 0],
]
shard_bounds = (('', 'here'), ('here', 'there'),
('there', 'where'), ('where', ''))
root_shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE)
expected_shard_dbs = []
for sr in root_shard_ranges:
db_hash = hash_path(sr.account, sr.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
# no objects
with self._mock_sharder(conf=conf) as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_not_called()
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_success'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_failure'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# now put objects
for obj in objects:
broker.put_object(*obj)
self._check_objects(objects, broker.db_file) # sanity check
# NB final shard range not available
with self._mock_sharder(conf=conf) as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges[:-1])
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True, params={'states': 'updating',
'marker': '',
'end_marker': 'here\x00'}),
mock.call(broker, newest=True, params={'states': 'updating',
'marker': 'where',
'end_marker': ''})])
sharder._replicate_object.assert_called_with(
0, expected_shard_dbs[0], 0),
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 1, 'placed': 2, 'unplaced': 2}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_success'))
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_failure'))
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
2, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertEqual(
2, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
# some misplaced objects could not be moved...
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'Failed to find destination for at least 2 misplaced objects',
warning_lines[0])
self.assertFalse(warning_lines[1:])
sharder.logger.clear()
# check misplaced objects were moved
self._check_objects(objects[:2], expected_shard_dbs[0])
# ... and removed from the source db
self._check_objects(objects[2:], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
self.assertFalse(os.path.exists(expected_shard_dbs[3]))
# repeat with final shard range available
with self._mock_sharder(conf=conf) as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True, params={'states': 'updating',
'marker': 'where',
'end_marker': ''})])
sharder._replicate_object.assert_called_with(
0, expected_shard_dbs[-1], 0),
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_success'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_failure'))
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
2, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check misplaced objects were moved
self._check_objects(objects[:2], expected_shard_dbs[0])
self._check_objects(objects[4:], expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects(objects[2:4], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
# repeat - no work remaining
with self._mock_sharder(conf=conf) as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_not_called()
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_success'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_failure'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_found'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
# and then more misplaced updates arrive
new_objects = [
['a', self.ts_encoded(), 51, 'text/plain', 'etag_a', 0, 0],
['z', self.ts_encoded(), 52, 'text/plain', 'etag_z', 0, 0],
]
for obj in new_objects:
broker.put_object(*obj)
# sanity check the puts landed in sharded broker
self._check_objects(new_objects[:1] + objects[2:4] + new_objects[1:],
broker.db_file)
with self._mock_sharder(conf=conf) as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True,
params={'states': 'updating',
'marker': '', 'end_marker': 'here\x00'}),
mock.call(broker, newest=True, params={'states': 'updating',
'marker': 'where',
'end_marker': ''})])
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0)
for db in (expected_shard_dbs[0], expected_shard_dbs[3])],
any_order=True
)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_success'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_failure'))
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertEqual(
2, sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_placed'))
self.assertFalse(
sharder.logger.statsd_client.get_stats_counts().get(
'misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check new misplaced objects were moved
self._check_objects(new_objects[:1] + objects[:2],
expected_shard_dbs[0])
self._check_objects(objects[4:] + new_objects[1:],
expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects(objects[2:4], broker.db_file)
# ... and nothing else moved
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
def test_misplaced_objects_shard_container_unsharded(self):
self._check_misplaced_objects_shard_container_unsharded()
def test_misplaced_objects_shard_container_unsharded_limit_two(self):
self._check_misplaced_objects_shard_container_unsharded(
conf={'cleave_row_batch_size': 2})
def test_misplaced_objects_shard_container_unsharded_limit_one(self):
self._check_misplaced_objects_shard_container_unsharded(
conf={'cleave_row_batch_size': 1})
def test_misplaced_objects_shard_container_sharding(self):
broker = self._make_broker(account='.shards_a', container='shard_c')
ts_shard = next(self.ts_iter)
# note that own_sr spans two root shard ranges
own_sr = ShardRange(broker.path, ts_shard, 'here', 'where')
own_sr.update_state(ShardRange.SHARDING)
own_sr.epoch = next(self.ts_iter)
broker.merge_shard_ranges([own_sr])
broker.set_sharding_sysmeta('Root', 'a/c')
self.assertEqual(own_sr, broker.get_own_shard_range()) # sanity check
self.assertEqual(UNSHARDED, broker.get_db_state())
objects = [
# some of these are misplaced objects
['b', self.ts_encoded(), 2, 'text/plain', 'etag_b', 0, 0],
['here', self.ts_encoded(), 2, 'text/plain', 'etag_here', 0, 0],
['n', self.ts_encoded(), 2, 'text/plain', 'etag_n', 0, 0],
['there', self.ts_encoded(), 3, 'text/plain', 'etag_there', 0, 0],
['v', self.ts_encoded(), 10, 'text/plain', 'etag_v', 0, 0],
['y', self.ts_encoded(), 10, 'text/plain', 'etag_y', 0, 0],
]
shard_bounds = (('', 'here'), ('here', 'there'),
('there', 'where'), ('where', ''))
root_shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE)
expected_shard_dbs = []
for sr in root_shard_ranges:
db_hash = hash_path(sr.account, sr.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
# pretend broker is sharding but not yet cleaved a shard
self.assertTrue(broker.set_sharding_state())
broker.merge_shard_ranges([dict(sr) for sr in root_shard_ranges[1:3]])
# then some updates arrive
for obj in objects:
broker.put_object(*obj)
broker.get_info()
self._check_objects(objects, broker.db_file) # sanity check
# first destination is not available
with self._mock_sharder() as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges[1:])
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True,
params={'states': 'updating',
'marker': '', 'end_marker': 'here\x00'}),
mock.call(broker, newest=True,
params={'states': 'updating',
'marker': 'where', 'end_marker': ''})])
sharder._replicate_object.assert_has_calls(
[mock.call(0, expected_shard_dbs[-1], 0)],
)
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 1, 'placed': 1, 'unplaced': 2}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'Failed to find destination for at least 2 misplaced objects',
warning_lines[0])
self.assertFalse(warning_lines[1:])
sharder.logger.clear()
# check some misplaced objects were moved
self._check_objects(objects[5:], expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects(objects[:5], broker.db_file)
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
# normality resumes and all destinations are available
with self._mock_sharder() as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True, params={'states': 'updating',
'marker': '',
'end_marker': 'here\x00'})]
)
sharder._replicate_object.assert_has_calls(
[mock.call(0, expected_shard_dbs[0], 0)],
)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check misplaced objects were moved
self._check_objects(objects[:2], expected_shard_dbs[0])
self._check_objects(objects[5:], expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects(objects[2:5], broker.db_file)
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
# pretend first shard has been cleaved
context = CleavingContext.load(broker)
context.cursor = 'there'
context.store(broker)
# and then more misplaced updates arrive
new_objects = [
['a', self.ts_encoded(), 51, 'text/plain', 'etag_a', 0, 0],
# this one is in the now cleaved shard range...
['k', self.ts_encoded(), 52, 'text/plain', 'etag_k', 0, 0],
['z', self.ts_encoded(), 53, 'text/plain', 'etag_z', 0, 0],
]
for obj in new_objects:
broker.put_object(*obj)
broker.get_info() # force updates to be committed
# sanity check the puts landed in sharded broker
self._check_objects(sorted(new_objects + objects[2:5]), broker.db_file)
with self._mock_sharder() as sharder:
sharder._fetch_shard_ranges = mock.MagicMock(
return_value=root_shard_ranges)
sharder._move_misplaced_objects(broker)
sharder._fetch_shard_ranges.assert_has_calls(
[mock.call(broker, newest=True,
params={'states': 'updating', 'marker': '',
'end_marker': 'there\x00'}),
mock.call(broker, newest=True,
params={'states': 'updating', 'marker': 'where',
'end_marker': ''})])
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in (expected_shard_dbs[0],
expected_shard_dbs[1],
expected_shard_dbs[-1])],
any_order=True
)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 5, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check *all* the misplaced objects were moved
self._check_objects(new_objects[:1] + objects[:2],
expected_shard_dbs[0])
self._check_objects(new_objects[1:2] + objects[2:4],
expected_shard_dbs[1])
self._check_objects(objects[5:] + new_objects[2:],
expected_shard_dbs[3])
# ... and removed from the source db
self._check_objects(objects[4:5], broker.db_file)
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
def test_misplaced_objects_deleted_and_updated(self):
# setup
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
shard_bounds = (('', 'here'), ('here', ''))
root_shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE)
expected_shard_dbs = []
for sr in root_shard_ranges:
db_hash = hash_path(sr.account, sr.container)
expected_shard_dbs.append(
os.path.join(self.tempdir, 'sda', 'containers', '0',
db_hash[-3:], db_hash, db_hash + '.db'))
broker.merge_shard_ranges(root_shard_ranges)
self.assertTrue(broker.set_sharding_state())
ts_older_internal = self.ts_encoded() # used later
# put deleted objects into source
objects = [
['b', self.ts_encoded(), 0, '', '', 1, 0],
['x', self.ts_encoded(), 0, '', '', 1, 0]
]
for obj in objects:
broker.put_object(*obj)
broker.get_info()
self._check_objects(objects, broker.db_file) # sanity check
# pretend we cleaved all ranges - sharded state
self.assertTrue(broker.set_sharded_state())
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in (expected_shard_dbs[0],
expected_shard_dbs[1])],
any_order=True
)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check new misplaced objects were moved
self._check_objects(objects[:1], expected_shard_dbs[0])
self._check_objects(objects[1:], expected_shard_dbs[1])
# ... and removed from the source db
self._check_objects([], broker.db_file)
# update source db with older undeleted versions of same objects
old_objects = [
['b', ts_older_internal, 2, 'text/plain', 'etag_b', 0, 0],
['x', ts_older_internal, 4, 'text/plain', 'etag_x', 0, 0]
]
for obj in old_objects:
broker.put_object(*obj)
broker.get_info()
self._check_objects(old_objects, broker.db_file) # sanity check
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in (expected_shard_dbs[0],
expected_shard_dbs[1])],
any_order=True
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check older misplaced objects were not merged to shard brokers
self._check_objects(objects[:1], expected_shard_dbs[0])
self._check_objects(objects[1:], expected_shard_dbs[1])
# ... and removed from the source db
self._check_objects([], broker.db_file)
# the destination shard dbs for misplaced objects may already exist so
# check they are updated correctly when overwriting objects
# update source db with newer deleted versions of same objects
new_objects = [
['b', self.ts_encoded(), 0, '', '', 1, 0],
['x', self.ts_encoded(), 0, '', '', 1, 0]
]
for obj in new_objects:
broker.put_object(*obj)
broker.get_info()
self._check_objects(new_objects, broker.db_file) # sanity check
shard_broker = ContainerBroker(
expected_shard_dbs[0], account=root_shard_ranges[0].account,
container=root_shard_ranges[0].container)
# update one shard container with even newer version of object
timestamps = [next(self.ts_iter) for i in range(7)]
ts_newer = encode_timestamps(
timestamps[1], timestamps[3], timestamps[5])
newer_object = ('b', ts_newer, 10, 'text/plain', 'etag_b', 0, 0)
shard_broker.put_object(*newer_object)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in (expected_shard_dbs[0],
expected_shard_dbs[1])],
any_order=True
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
1, sharder.logger.statsd_client.get_stats_counts()[
'misplaced_found'])
# check only the newer misplaced object was moved
self._check_objects([newer_object], expected_shard_dbs[0])
self._check_objects(new_objects[1:], expected_shard_dbs[1])
# ... and removed from the source db
self._check_objects([], broker.db_file)
# update source with a version of 'b' that has newer data
# but older content-type and metadata relative to shard object
ts_update = encode_timestamps(
timestamps[2], timestamps[3], timestamps[4])
update_object = ('b', ts_update, 20, 'text/ignored', 'etag_newer', 0,
0)
broker.put_object(*update_object)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
ts_expected = encode_timestamps(
timestamps[2], timestamps[3], timestamps[5])
expected = ('b', ts_expected, 20, 'text/plain', 'etag_newer', 0, 0)
self._check_objects([expected], expected_shard_dbs[0])
self._check_objects([], broker.db_file)
# update source with a version of 'b' that has older data
# and content-type but newer metadata relative to shard object
ts_update = encode_timestamps(
timestamps[1], timestamps[3], timestamps[6])
update_object = ('b', ts_update, 999, 'text/ignored', 'etag_b', 0, 0)
broker.put_object(*update_object)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
ts_expected = encode_timestamps(
timestamps[2], timestamps[3], timestamps[6])
expected = ('b', ts_expected, 20, 'text/plain', 'etag_newer', 0, 0)
self._check_objects([expected], expected_shard_dbs[0])
self._check_objects([], broker.db_file)
# update source with a version of 'b' that has older data
# but newer content-type and metadata
ts_update = encode_timestamps(
timestamps[2], timestamps[6], timestamps[6])
update_object = ('b', ts_update, 999, 'text/newer', 'etag_b', 0, 0)
broker.put_object(*update_object)
with self._mock_sharder() as sharder:
sharder._move_misplaced_objects(broker)
ts_expected = encode_timestamps(
timestamps[2], timestamps[6], timestamps[6])
expected = ('b', ts_expected, 20, 'text/newer', 'etag_newer', 0, 0)
self._check_objects([expected], expected_shard_dbs[0])
self._check_objects([], broker.db_file)
def _setup_old_style_find_ranges(self, account, cont, lower, upper):
broker = self._make_broker(account=account, container=cont)
own_sr = ShardRange('%s/%s' % (account, cont), Timestamp.now(),
lower, upper)
broker.merge_shard_ranges([own_sr])
broker.set_sharding_sysmeta('Root', 'a/c')
objects = [
# some of these are misplaced objects
['obj%3d' % i, self.ts_encoded(), i, 'text/plain', 'etag%s' % i, 0]
for i in range(100)]
for obj in objects:
broker.put_object(*obj)
return broker, objects
def _check_old_style_find_shard_ranges_none_found(self, broker, objects):
with self._mock_sharder() as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertGreater(sharder.rows_per_shard, len(objects))
self.assertEqual(0, num_found)
self.assertFalse(broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
with self._mock_sharder(
conf={'shard_container_threshold': 200}) as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(sharder.rows_per_shard, len(objects))
self.assertEqual(0, num_found)
self.assertFalse(broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_old_style_find_shard_ranges_none_found_root(self):
broker, objects = self._setup_old_style_find_ranges('a', 'c', '', '')
self._check_old_style_find_shard_ranges_none_found(broker, objects)
def test_old_style_find_shard_ranges_none_found_shard(self):
broker, objects = self._setup_old_style_find_ranges(
'.shards_a', 'c', 'lower', 'upper')
self._check_old_style_find_shard_ranges_none_found(broker, objects)
def _check_old_style_find_shard_ranges_finds_two(
self, account, cont, lower, upper):
def check_ranges():
self.assertEqual(2, len(broker.get_shard_ranges()))
expected_ranges = [
ShardRange(
ShardRange.make_path('.int_shards_a', 'c', cont, now, 0),
now, lower, objects[98][0], 99),
ShardRange(
ShardRange.make_path('.int_shards_a', 'c', cont, now, 1),
now, objects[98][0], upper, 1),
]
self._assert_shard_ranges_equal(expected_ranges,
broker.get_shard_ranges())
# first invocation finds both ranges
broker, objects = self._setup_old_style_find_ranges(
account, cont, lower, upper)
with self._mock_sharder(conf={'shard_container_threshold': 199,
'minimum_shard_size': 1,
'shrink_threshold': 0,
'auto_create_account_prefix': '.int_'}
) as sharder:
with mock_timestamp_now() as now:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(99, sharder.rows_per_shard)
self.assertEqual(2, num_found)
check_ranges()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 2, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
# second invocation finds none
with self._mock_sharder(conf={'shard_container_threshold': 199,
'minimum_shard_size': 1,
'shrink_threshold': 0,
'auto_create_account_prefix': '.int_'}
) as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(0, num_found)
self.assertEqual(2, len(broker.get_shard_ranges()))
check_ranges()
expected_stats = {'attempted': 0, 'success': 0, 'failure': 0,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_old_style_find_shard_ranges_finds_two_root(self):
self._check_old_style_find_shard_ranges_finds_two('a', 'c', '', '')
def test_old_style_find_shard_ranges_finds_two_shard(self):
self._check_old_style_find_shard_ranges_finds_two(
'.shards_a', 'c_', 'l', 'u')
def _setup_find_ranges(self, account, cont, lower, upper):
broker = self._make_broker(account=account, container=cont)
own_sr = ShardRange('%s/%s' % (account, cont), Timestamp.now(),
lower, upper)
broker.merge_shard_ranges([own_sr])
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
objects = [
# some of these are misplaced objects
['obj%3d' % i, self.ts_encoded(), i, 'text/plain', 'etag%s' % i, 0]
for i in range(100)]
for obj in objects:
broker.put_object(*obj)
return broker, objects
def _check_find_shard_ranges_none_found(self, broker, objects):
with self._mock_sharder() as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertGreater(sharder.rows_per_shard, len(objects))
self.assertEqual(0, num_found)
self.assertFalse(broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
with self._mock_sharder(
conf={'shard_container_threshold': 200}) as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(sharder.rows_per_shard, len(objects))
self.assertEqual(0, num_found)
self.assertFalse(broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_find_shard_ranges_none_found_root(self):
broker, objects = self._setup_find_ranges('a', 'c', '', '')
self._check_find_shard_ranges_none_found(broker, objects)
def test_find_shard_ranges_none_found_shard(self):
broker, objects = self._setup_find_ranges(
'.shards_a', 'c', 'lower', 'upper')
self._check_find_shard_ranges_none_found(broker, objects)
def _check_find_shard_ranges_finds_two(self, account, cont, lower, upper):
def check_ranges():
self.assertEqual(2, len(broker.get_shard_ranges()))
expected_ranges = [
ShardRange(
ShardRange.make_path('.int_shards_a', 'c', cont, now, 0),
now, lower, objects[98][0], 99),
ShardRange(
ShardRange.make_path('.int_shards_a', 'c', cont, now, 1),
now, objects[98][0], upper, 1),
]
self._assert_shard_ranges_equal(expected_ranges,
broker.get_shard_ranges())
# first invocation finds both ranges, sizes 99 and 1
broker, objects = self._setup_find_ranges(
account, cont, lower, upper)
with self._mock_sharder(conf={'shard_container_threshold': 199,
'minimum_shard_size': 1,
'shrink_threshold': 0,
'auto_create_account_prefix': '.int_'}
) as sharder:
with mock_timestamp_now() as now:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(99, sharder.rows_per_shard)
self.assertEqual(2, num_found)
check_ranges()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 2, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
# second invocation finds none
with self._mock_sharder(conf={'shard_container_threshold': 199,
'auto_create_account_prefix': '.int_'}
) as sharder:
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(0, num_found)
self.assertEqual(2, len(broker.get_shard_ranges()))
check_ranges()
expected_stats = {'attempted': 0, 'success': 0, 'failure': 0,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_find_shard_ranges_finds_two_root(self):
self._check_find_shard_ranges_finds_two('a', 'c', '', '')
def test_find_shard_ranges_finds_two_shard(self):
self._check_find_shard_ranges_finds_two('.shards_a', 'c_', 'l', 'u')
def _check_find_shard_ranges_finds_three(self, account, cont, lower,
upper):
broker, objects = self._setup_find_ranges(
account, cont, lower, upper)
now = Timestamp.now()
expected_ranges = [
ShardRange(
ShardRange.make_path('.shards_a', 'c', cont, now, 0),
now, lower, objects[44][0], 45),
ShardRange(
ShardRange.make_path('.shards_a', 'c', cont, now, 1),
now, objects[44][0], objects[89][0], 45),
ShardRange(
ShardRange.make_path('.shards_a', 'c', cont, now, 2),
now, objects[89][0], upper, 10),
]
# first invocation finds 2 ranges
# (third shard range will be > minimum_shard_size)
with self._mock_sharder(
conf={'shard_container_threshold': 90,
'shard_scanner_batch_size': 2,
'minimum_shard_size': 10}) as sharder:
with mock_timestamp_now(now):
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(45, sharder.rows_per_shard)
self.assertEqual(2, num_found)
self.assertEqual(2, len(broker.get_shard_ranges()))
self._assert_shard_ranges_equal(expected_ranges[:2],
broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 2, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
# second invocation finds third shard range
with self._mock_sharder(conf={'shard_container_threshold': 90,
'shard_scanner_batch_size': 2,
'minimum_shard_size': 10}
) as sharder:
with mock_timestamp_now(now):
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(1, num_found)
self.assertEqual(3, len(broker.get_shard_ranges()))
self._assert_shard_ranges_equal(expected_ranges,
broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 1, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
# third invocation finds none
with self._mock_sharder(conf={'shard_container_threshold': 199,
'shard_scanner_batch_size': 2,
'shrink_threshold': 0,
'minimum_shard_size': 10}
) as sharder:
sharder._send_shard_ranges = mock.MagicMock(return_value=True)
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(0, num_found)
self.assertEqual(3, len(broker.get_shard_ranges()))
self._assert_shard_ranges_equal(expected_ranges,
broker.get_shard_ranges())
expected_stats = {'attempted': 0, 'success': 0, 'failure': 0,
'found': 0, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_find_shard_ranges_finds_three_root(self):
self._check_find_shard_ranges_finds_three('a', 'c', '', '')
def test_find_shard_ranges_finds_three_shard(self):
self._check_find_shard_ranges_finds_three('.shards_a', 'c_', 'l', 'u')
def test_find_shard_ranges_with_minimum_size(self):
cont = 'c_'
lower = 'l'
upper = 'u'
broker, objects = self._setup_find_ranges(
'.shards_a', 'c_', lower, upper)
now = Timestamp.now()
expected_ranges = [
ShardRange(
ShardRange.make_path('.shards_a', 'c', cont, now, 0),
now, lower, objects[44][0], 45),
ShardRange(
ShardRange.make_path('.shards_a', 'c', cont, now, 1),
now, objects[44][0], upper, 55),
]
# first invocation finds 2 ranges - second has been extended to avoid
# final shard range < minimum_size
with self._mock_sharder(
conf={'shard_container_threshold': 90,
'shard_scanner_batch_size': 2,
'minimum_shard_size': 11}) as sharder:
with mock_timestamp_now(now):
num_found = sharder._find_shard_ranges(broker)
self.assertEqual(45, sharder.rows_per_shard)
self.assertEqual(11, sharder.minimum_shard_size)
self.assertEqual(2, num_found)
self.assertEqual(2, len(broker.get_shard_ranges()))
self._assert_shard_ranges_equal(expected_ranges[:2],
broker.get_shard_ranges())
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 2, 'min_time': mock.ANY,
'max_time': mock.ANY}
stats = self._assert_stats(expected_stats, sharder, 'scanned')
self.assertGreaterEqual(stats['max_time'], stats['min_time'])
def test_sharding_enabled(self):
broker = self._make_broker()
self.assertFalse(sharding_enabled(broker))
# Setting sharding to a true value and sharding will be enabled
broker.update_metadata(
{'X-Container-Sysmeta-Sharding':
('yes', Timestamp.now().internal)})
self.assertTrue(sharding_enabled(broker))
# deleting broker doesn't clear the Sysmeta-Sharding sysmeta
broker.delete_db(Timestamp.now().internal)
self.assertTrue(sharding_enabled(broker))
# re-init the DB for the deleted tests
broker.set_storage_policy_index(0, Timestamp.now().internal)
broker.update_metadata(
{'X-Container-Sysmeta-Sharding':
('yes', Timestamp.now().internal)})
self.assertTrue(sharding_enabled(broker))
# if the Sysmeta-Sharding is falsy value then sharding isn't enabled
for value in ('', 'no', 'false', 'some_fish'):
broker.update_metadata(
{'X-Container-Sysmeta-Sharding':
(value, Timestamp.now().internal)})
self.assertFalse(sharding_enabled(broker))
# deleting broker doesn't clear the Sysmeta-Sharding sysmeta
broker.delete_db(Timestamp.now().internal)
self.assertEqual(broker.metadata['X-Container-Sysmeta-Sharding'][0],
'some_fish')
# so it still isn't enabled (some_fish isn't a true value).
self.assertFalse(sharding_enabled(broker))
# but if broker has a shard range then sharding is enabled
broker.merge_shard_ranges(
ShardRange('acc/a_shard', Timestamp.now(), 'l', 'u'))
self.assertTrue(sharding_enabled(broker))
def test_send_shard_ranges(self):
broker = self._make_broker()
shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
def do_test(replicas, *resp_codes):
sent_data = defaultdict(bytes)
def on_send(fake_conn, data):
sent_data[fake_conn] += data
with self._mock_sharder(replicas=replicas) as sharder:
with mocked_http_conn(*resp_codes, give_send=on_send) as conn:
with mock_timestamp_now() as now:
res = sharder._send_shard_ranges(
broker, 'a', 'c', shard_ranges)
self.assertEqual(sharder.ring.replica_count, len(conn.requests))
expected_body = json.dumps([dict(sr) for sr in shard_ranges])
expected_body = expected_body.encode('ascii')
expected_headers = {'Content-Type': 'application/json',
'Content-Length': str(len(expected_body)),
'X-Timestamp': now.internal,
'X-Backend-Record-Type': 'shard',
'User-Agent': mock.ANY}
for data in sent_data.values():
self.assertEqual(expected_body, data)
hosts = set()
for req in conn.requests:
path_parts = req['path'].split('/')[1:]
hosts.add('%s:%s/%s' % (req['ip'], req['port'], path_parts[0]))
# FakeRing only has one partition
self.assertEqual('0', path_parts[1])
self.assertEqual('PUT', req['method'])
self.assertEqual(['a', 'c'], path_parts[-2:])
req_headers = req['headers']
for k, v in expected_headers.items():
self.assertEqual(v, req_headers[k])
self.assertTrue(
req_headers['User-Agent'].startswith('container-sharder'))
self.assertEqual(sharder.ring.replica_count, len(hosts))
return res, sharder, hosts
replicas = 3
res, sharder, _ = do_test(replicas, 202, 202, 202)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 202, 202, 404)
self.assertTrue(res)
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 202, 202, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
res, sharder, _ = do_test(replicas, 202, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, hosts = do_test(replicas, 500, 500, 500)
self.assertFalse(res)
self.assertEqual(set(
'Failed to put shard ranges to %s a/c: 500, path: a/c, db: %s' %
(host, broker.db_file) for host in hosts),
set(sharder.logger.get_lines_for_level('warning')))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, Exception, Exception, 202)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
res, sharder, _ = do_test(replicas, Exception, eventlet.Timeout(), 202)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
replicas = 2
res, sharder, _ = do_test(replicas, 202, 202)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 202, 404)
self.assertTrue(res)
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 202, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
res, sharder, _ = do_test(replicas, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, hosts = do_test(replicas, Exception, Exception)
self.assertFalse(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual(set(
'Failed to put shard ranges to %s a/c: FakeStatus Error, '
'path: a/c, db: %s: ' % (host, broker.db_file) for host in hosts),
set(sharder.logger.get_lines_for_level('error')))
res, sharder, _ = do_test(replicas, eventlet.Timeout(), Exception)
self.assertFalse(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
replicas = 4
res, sharder, _ = do_test(replicas, 202, 202, 202, 202)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertTrue(res)
res, sharder, _ = do_test(replicas, 202, 202, 404, 404)
self.assertTrue(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 202, 202, Exception, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
res, sharder, _ = do_test(replicas, 202, 404, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, 500, 500, 500, 202)
self.assertFalse(res)
self.assertEqual([True, True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
res, sharder, _ = do_test(replicas, Exception, Exception, 202, 404)
self.assertFalse(res)
self.assertEqual([True], [
all(msg in line for msg in ('Failed to put shard ranges', '404'))
for line in sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
res, sharder, _ = do_test(
replicas, eventlet.Timeout(), eventlet.Timeout(), 202, 404)
self.assertFalse(res)
self.assertEqual([True], [
all(msg in line for msg in ('Failed to put shard ranges', '404'))
for line in sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
self.assertEqual([True, True], [
'path: a/c, db: %s' % broker.db_file in line for line in
sharder.logger.get_lines_for_level('error')])
def test_process_broker_not_sharding_no_others(self):
# verify that sharding process will not start when own shard range is
# missing or in wrong state or there are no other shard ranges
broker = self._make_broker()
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
# sanity check
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
# no own shard range
with self._mock_sharder() as sharder:
sharder._process_broker(broker, node, 99)
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
broker.logger.clear()
# now add own shard range
for state in sorted(ShardRange.STATES):
own_sr = broker.get_own_shard_range() # returns the default
own_sr.update_state(state)
broker.merge_shard_ranges([own_sr])
with mock.patch.object(
broker, 'set_sharding_state') as mock_set_sharding_state:
with self._mock_sharder() as sharder:
with mock_timestamp_now():
with mock.patch.object(sharder, '_audit_container'):
sharder._process_broker(broker, node, 99)
own_shard_range = broker.get_own_shard_range(
no_default=True)
mock_set_sharding_state.assert_not_called()
self.assertEqual(dict(own_sr), dict(own_shard_range))
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
broker.logger.clear()
def _check_process_broker_sharding_others(self, start_state, deleted):
# verify that when existing own_shard_range has given state and there
# are other shard ranges then the sharding process will complete
broker = self._make_broker(hash_='hash%s%s' % (start_state, deleted))
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
own_sr = broker.get_own_shard_range()
self.assertTrue(own_sr.update_state(start_state))
epoch = next(self.ts_iter)
own_sr.epoch = epoch
shard_ranges = self._make_shard_ranges((('', 'm'), ('m', '')))
broker.merge_shard_ranges([own_sr] + shard_ranges)
if deleted:
broker.delete_db(next(self.ts_iter).internal)
with self._mock_sharder() as sharder:
# pretend shard containers are created ok so sharding proceeds
with mock.patch.object(
sharder, '_send_shard_ranges', return_value=True):
with mock_timestamp_now_with_iter(self.ts_iter):
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
final_own_sr = broker.get_own_shard_range(no_default=True)
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(epoch.normal, parse_db_filename(broker.db_file)[1])
lines = broker.logger.get_lines_for_level('info')
self.assertIn('Completed creating 2 shard range containers, '
'path: a/c, db: %s' % broker.db_file, lines)
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
self.assertEqual(deleted, broker.is_deleted())
return own_sr, final_own_sr
def test_process_broker_sharding_completes_with_own_and_other_ranges(self):
own_sr, final_own_sr = self._check_process_broker_sharding_others(
ShardRange.SHARDING, False)
exp_own_sr = dict(own_sr, state=ShardRange.SHARDED,
meta_timestamp=mock.ANY)
self.assertEqual(exp_own_sr, dict(final_own_sr))
# verify that deleted DBs will be sharded
own_sr, final_own_sr = self._check_process_broker_sharding_others(
ShardRange.SHARDING, True)
exp_own_sr = dict(own_sr, state=ShardRange.SHARDED,
meta_timestamp=mock.ANY)
self.assertEqual(exp_own_sr, dict(final_own_sr))
own_sr, final_own_sr = self._check_process_broker_sharding_others(
ShardRange.SHRINKING, False)
exp_own_sr = dict(own_sr, state=ShardRange.SHRUNK,
meta_timestamp=mock.ANY)
self.assertEqual(exp_own_sr, dict(final_own_sr))
# verify that deleted DBs will be shrunk
own_sr, final_own_sr = self._check_process_broker_sharding_others(
ShardRange.SHRINKING, True)
exp_own_sr = dict(own_sr, state=ShardRange.SHRUNK,
meta_timestamp=mock.ANY)
self.assertEqual(exp_own_sr, dict(final_own_sr))
def test_process_broker_not_sharding_others(self):
# verify that sharding process will not start when own shard range is
# missing or in wrong state even when other shard ranges are in the db
broker = self._make_broker()
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
# sanity check
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
# add shard ranges - but not own
shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
broker.merge_shard_ranges(shard_ranges)
with self._mock_sharder() as sharder:
sharder._process_broker(broker, node, 99)
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
broker.logger.clear()
# now add own shard range
for state in sorted(ShardRange.STATES):
if state in (ShardRange.SHARDING,
ShardRange.SHRINKING,
ShardRange.SHARDED,
ShardRange.SHRUNK):
epoch = None
else:
epoch = Timestamp.now()
own_sr = broker.get_own_shard_range() # returns the default
own_sr.update_state(state)
own_sr.epoch = epoch
broker.merge_shard_ranges([own_sr])
with self._mock_sharder() as sharder:
with mock_timestamp_now():
sharder._process_broker(broker, node, 99)
own_shard_range = broker.get_own_shard_range(
no_default=True)
self.assertEqual(dict(own_sr), dict(own_shard_range))
self.assertEqual(UNSHARDED, broker.get_db_state())
if epoch:
self.assertFalse(broker.logger.get_lines_for_level('warning'))
else:
self.assertIn('missing epoch',
broker.logger.get_lines_for_level('warning')[0])
self.assertFalse(broker.logger.get_lines_for_level('error'))
broker.logger.clear()
def _check_process_broker_sharding_stalls_others(self, state):
# verify states in which own_shard_range will cause sharding
# process to start when other shard ranges are in the db, but stop
# when shard containers have not been created
broker = self._make_broker(hash_='hash%s' % state)
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
# add shard ranges - but not own
shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
broker.merge_shard_ranges(shard_ranges)
# sanity check
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
# now set own shard range to given state and persist it
own_sr = broker.get_own_shard_range() # returns the default
self.assertTrue(own_sr.update_state(state))
epoch = Timestamp.now()
own_sr.epoch = epoch
broker.merge_shard_ranges([own_sr])
with self._mock_sharder() as sharder:
with mock_timestamp_now():
# we're not testing rest of the process here so prevent any
# attempt to progress shard range states
sharder._create_shard_containers = lambda *args: 0
sharder._process_broker(broker, node, 99)
own_shard_range = broker.get_own_shard_range(no_default=True)
self.assertEqual(dict(own_sr), dict(own_shard_range))
self.assertEqual(SHARDING, broker.get_db_state())
self.assertEqual(epoch.normal, parse_db_filename(broker.db_file)[1])
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
def test_process_broker_sharding_stalls_with_own_and_other_ranges(self):
self._check_process_broker_sharding_stalls_others(ShardRange.SHARDING)
self._check_process_broker_sharding_stalls_others(ShardRange.SHRINKING)
self._check_process_broker_sharding_stalls_others(ShardRange.SHARDED)
def test_process_broker_leader_auto_shard(self):
# verify conditions for acting as auto-shard leader
broker = self._make_broker(put_timestamp=next(self.ts_iter).internal)
objects = [
['obj%3d' % i, self.ts_encoded(), i, 'text/plain',
'etag%s' % i, 0] for i in range(10)]
for obj in objects:
broker.put_object(*obj)
self.assertEqual(10, broker.get_info()['object_count'])
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
def do_process(conf):
with self._mock_sharder(conf) as sharder:
with mock_timestamp_now():
# we're not testing rest of the process here so prevent any
# attempt to progress shard range states
sharder._create_shard_containers = lambda *args: 0
sharder._process_broker(broker, node, 99)
# auto shard disabled
conf = {'shard_container_threshold': 10,
'rows_per_shard': 5,
'shrink_threshold': 1,
'auto_shard': False}
do_process(conf)
self.assertEqual(UNSHARDED, broker.get_db_state())
own_sr = broker.get_own_shard_range(no_default=True)
self.assertIsNone(own_sr)
# auto shard enabled, not node 0
conf['auto_shard'] = True
node['index'] = 1
do_process(conf)
self.assertEqual(UNSHARDED, broker.get_db_state())
own_sr = broker.get_own_shard_range(no_default=True)
self.assertIsNone(own_sr)
# auto shard enabled, node 0 -> start sharding
node['index'] = 0
do_process(conf)
self.assertEqual(SHARDING, broker.get_db_state())
own_sr = broker.get_own_shard_range(no_default=True)
self.assertIsNotNone(own_sr)
self.assertEqual(ShardRange.SHARDING, own_sr.state)
self.assertEqual(own_sr.epoch.normal,
parse_db_filename(broker.db_file)[1])
self.assertEqual(2, len(broker.get_shard_ranges()))
def test_process_broker_leader_auto_shard_deleted_db(self):
# verify no auto-shard leader if broker is deleted
conf = {'shard_container_threshold': 10,
'rows_per_shard': 5,
'shrink_threshold': 1,
'auto_shard': True}
broker = self._make_broker(put_timestamp=next(self.ts_iter).internal)
broker.delete_db(next(self.ts_iter).internal)
self.assertTrue(broker.is_deleted()) # sanity check
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
with self._mock_sharder(conf) as sharder:
with mock_timestamp_now():
with mock.patch.object(
sharder, '_find_and_enable_sharding_candidates'
) as mock_find_and_enable:
sharder._process_broker(broker, node, 99)
self.assertEqual(UNSHARDED, broker.get_db_state())
own_sr = broker.get_own_shard_range(no_default=True)
self.assertIsNone(own_sr)
# this is the only concrete assertion that verifies the leader actions
# are not taken; no shard ranges would actually be found for an empty
# deleted db so there's no other way to differentiate from an undeleted
# db being processed...
mock_find_and_enable.assert_not_called()
def check_shard_ranges_sent(self, broker, expected_sent):
bodies = []
servers = []
referers = []
def capture_send(conn, data):
bodies.append(data)
def capture_connect(host, port, _method, _path, headers, *a, **kw):
servers.append((host, port))
referers.append(headers.get('Referer'))
self.assertFalse(broker.get_own_shard_range().reported) # sanity
with self._mock_sharder() as sharder:
with mocked_http_conn(204, 204, 204,
give_send=capture_send,
give_connect=capture_connect) as mock_conn:
sharder._update_root_container(broker)
for req in mock_conn.requests:
self.assertEqual('PUT', req['method'])
self.assertEqual([expected_sent] * 3,
[json.loads(b) for b in bodies])
self.assertEqual(servers, [
# NB: replication interfaces
('10.0.1.0', 1100),
('10.0.1.1', 1101),
('10.0.1.2', 1102),
])
self.assertEqual([broker.path] * 3, referers)
self.assertTrue(broker.get_own_shard_range().reported)
def test_update_root_container_own_range(self):
broker = self._make_broker()
obj_names = []
# nothing to send
with self._mock_sharder() as sharder:
with mocked_http_conn() as mock_conn:
sharder._update_root_container(broker)
self.assertFalse(mock_conn.requests)
def check_only_own_shard_range_sent(state):
own_shard_range = broker.get_own_shard_range()
self.assertTrue(own_shard_range.update_state(
state, state_timestamp=next(self.ts_iter)))
broker.merge_shard_ranges([own_shard_range])
# add an object, expect to see it reflected in the own shard range
# that is sent
obj_names.append(uuid4())
broker.put_object(str(obj_names[-1]),
next(self.ts_iter).internal, 1, '', '')
with mock_timestamp_now() as now:
# check if the state if in SHARD_UPDATE_STAT_STATES
if state in [ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING, ShardRange.SHARDED,
ShardRange.SHRINKING, ShardRange.SHRUNK]:
exp_obj_count = len(obj_names)
expected_sent = [
dict(own_shard_range,
meta_timestamp=now.internal,
object_count=len(obj_names),
bytes_used=len(obj_names))]
else:
exp_obj_count = own_shard_range.object_count
expected_sent = [
dict(own_shard_range)]
self.check_shard_ranges_sent(broker, expected_sent)
self.assertEqual(
exp_obj_count, broker.get_own_shard_range().object_count)
# initialise tombstones
with mock_timestamp_now(next(self.ts_iter)):
own_shard_range = broker.get_own_shard_range()
own_shard_range.update_tombstones(0)
broker.merge_shard_ranges([own_shard_range])
for state in ShardRange.STATES:
with annotate_failure(state):
check_only_own_shard_range_sent(state)
init_obj_count = len(obj_names)
def check_tombstones_sent(state):
own_shard_range = broker.get_own_shard_range()
self.assertTrue(own_shard_range.update_state(
state, state_timestamp=next(self.ts_iter)))
broker.merge_shard_ranges([own_shard_range])
# delete an object, expect to see it reflected in the own shard
# range that is sent
broker.delete_object(str(obj_names.pop(-1)),
next(self.ts_iter).internal)
with mock_timestamp_now() as now:
# check if the state if in SHARD_UPDATE_STAT_STATES
if state in [ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING, ShardRange.SHARDED,
ShardRange.SHRINKING, ShardRange.SHRUNK]:
expected_sent = [
dict(own_shard_range,
meta_timestamp=now.internal,
object_count=len(obj_names),
bytes_used=len(obj_names),
tombstones=init_obj_count - len(obj_names))]
else:
expected_sent = [
dict(own_shard_range)]
self.check_shard_ranges_sent(broker, expected_sent)
for i, state in enumerate(ShardRange.STATES):
with annotate_failure(state):
check_tombstones_sent(state)
def test_update_root_container_already_reported(self):
broker = self._make_broker()
def check_already_reported_not_sent(state):
own_shard_range = broker.get_own_shard_range()
own_shard_range.reported = True
self.assertTrue(own_shard_range.update_state(
state, state_timestamp=next(self.ts_iter)))
# Check that updating state clears the flag
self.assertFalse(own_shard_range.reported)
# If we claim to have already updated...
own_shard_range.reported = True
broker.merge_shard_ranges([own_shard_range])
# ... then there's nothing to send
with self._mock_sharder() as sharder:
with mocked_http_conn() as mock_conn:
sharder._update_root_container(broker)
self.assertFalse(mock_conn.requests)
# initialise tombstones
with mock_timestamp_now(next(self.ts_iter)):
own_shard_range = broker.get_own_shard_range()
own_shard_range.update_tombstones(0)
broker.merge_shard_ranges([own_shard_range])
for state in ShardRange.STATES:
with annotate_failure(state):
check_already_reported_not_sent(state)
def test_update_root_container_all_ranges(self):
broker = self._make_broker()
other_shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
self.assertTrue(other_shard_ranges[0].set_deleted())
broker.merge_shard_ranges(other_shard_ranges)
obj_names = []
# own range missing - send nothing
with self._mock_sharder() as sharder:
with mocked_http_conn() as mock_conn:
sharder._update_root_container(broker)
self.assertFalse(mock_conn.requests)
def check_all_shard_ranges_sent(state):
own_shard_range = broker.get_own_shard_range()
self.assertTrue(own_shard_range.update_state(
state, state_timestamp=next(self.ts_iter)))
broker.merge_shard_ranges([own_shard_range])
# add an object, expect to see it reflected in the own shard range
# that is sent
obj_names.append(uuid4())
broker.put_object(str(obj_names[-1]),
next(self.ts_iter).internal, 1, '', '')
with mock_timestamp_now() as now:
shard_ranges = broker.get_shard_ranges(include_deleted=True)
exp_own_shard_range = own_shard_range.copy()
# check if the state if in SHARD_UPDATE_STAT_STATES
if state in [ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING, ShardRange.SHARDED,
ShardRange.SHRINKING, ShardRange.SHRUNK]:
exp_own_shard_range.object_count = len(obj_names)
exp_own_shard_range.bytes_used = len(obj_names)
exp_own_shard_range.meta_timestamp = now.internal
exp_own_shard_range.tombstones = 0
expected_sent = sorted(
[exp_own_shard_range] + shard_ranges,
key=lambda sr: (sr.upper, sr.state, sr.lower))
self.check_shard_ranges_sent(
broker, [dict(sr) for sr in expected_sent])
for state in ShardRange.STATES.keys():
with annotate_failure(state):
check_all_shard_ranges_sent(state)
def test_audit_root_container_reset_epoch(self):
epoch = next(self.ts_iter)
broker = self._make_broker(epoch=epoch.normal)
shard_bounds = (('', 'j'), ('j', 'k'), ('k', 's'),
('s', 'y'), ('y', ''))
shard_ranges = self._make_shard_ranges(shard_bounds,
ShardRange.ACTIVE,
timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
own_shard_range = broker.get_own_shard_range()
own_shard_range.update_state(ShardRange.SHARDED, next(self.ts_iter))
own_shard_range.epoch = epoch
broker.merge_shard_ranges(own_shard_range)
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats({'attempted': 1, 'success': 1, 'failure': 0},
sharder, 'audit_root')
mocked.assert_not_called()
# test for a reset epoch
own_shard_range = broker.get_own_shard_range()
own_shard_range.epoch = None
own_shard_range.state_timestamp = next(self.ts_iter)
broker.merge_shard_ranges(own_shard_range)
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn("own_shard_range reset to None should be %s"
% broker.db_epoch, lines[0])
def test_audit_root_container(self):
broker = self._make_broker()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'has_overlap': 0, 'num_overlap': 0}
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
self._assert_stats(expected_stats, sharder, 'audit_root')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
mocked.assert_not_called()
def assert_overlap_warning(line, state_text):
self.assertIn('Audit failed for root', line)
self.assertIn(broker.db_file, line)
self.assertIn(broker.path, line)
self.assertIn(
'overlapping ranges in state %r: k-t s-y, y-z y-z'
% state_text, line)
# check for no duplicates in reversed order
self.assertNotIn('s-z k-t', line)
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'has_overlap': 1, 'num_overlap': 2}
shard_bounds = (('a', 'j'), ('k', 't'), ('s', 'y'),
('y', 'z'), ('y', 'z'))
for state, state_text in ShardRange.STATES.items():
if state in (ShardRange.SHRINKING,
ShardRange.SHARDED,
ShardRange.SHRUNK):
continue # tested separately below
shard_ranges = self._make_shard_ranges(
shard_bounds, state, timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
lines = sharder.logger.get_lines_for_level('warning')
assert_overlap_warning(lines[0], state_text)
self.assertFalse(lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats(expected_stats, sharder, 'audit_root')
mocked.assert_not_called()
shard_ranges = self._make_shard_ranges(shard_bounds,
ShardRange.SHRINKING,
timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats({'attempted': 1, 'success': 1, 'failure': 0,
'has_overlap': 0, 'num_overlap': 0},
sharder, 'audit_root')
mocked.assert_not_called()
for state in (ShardRange.SHRUNK, ShardRange.SHARDED):
shard_ranges = self._make_shard_ranges(
shard_bounds, state, timestamp=next(self.ts_iter))
for sr in shard_ranges:
sr.set_deleted(Timestamp.now())
broker.merge_shard_ranges(shard_ranges)
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats({'attempted': 1, 'success': 1, 'failure': 0,
'has_overlap': 0, 'num_overlap': 0},
sharder, 'audit_root')
mocked.assert_not_called()
# Put the shards back to a "useful" state
shard_ranges = self._make_shard_ranges(shard_bounds,
ShardRange.ACTIVE,
timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
def assert_missing_warning(line):
self.assertIn('Audit failed for root', line)
self.assertIn('missing range(s): -a j-k z-', line)
self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
line)
def check_missing():
own_shard_range = broker.get_own_shard_range()
states = (ShardRange.SHARDING, ShardRange.SHARDED)
for state in states:
own_shard_range.update_state(
state, state_timestamp=next(self.ts_iter))
broker.merge_shard_ranges([own_shard_range])
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
lines = sharder.logger.get_lines_for_level('warning')
assert_missing_warning(lines[0])
assert_overlap_warning(lines[0], 'active')
self.assertFalse(lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats(expected_stats, sharder, 'audit_root')
mocked.assert_not_called()
check_missing()
# fill the gaps with shrinking shards and check that these are still
# reported as 'missing'
missing_shard_bounds = (('', 'a'), ('j', 'k'), ('z', ''))
shrinking_shard_ranges = self._make_shard_ranges(
missing_shard_bounds, ShardRange.SHRINKING,
timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shrinking_shard_ranges)
check_missing()
def test_audit_root_container_with_parent_child_overlapping(self):
# Test '_audit_root_container' when overlapping shard ranges are
# parent and children, expect no warnings. The case of non parent-child
# overlapping is tested in 'test_audit_root_container'.
now_ts = next(self.ts_iter)
past_ts = Timestamp(float(now_ts) - 604801)
root_sr = ShardRange('a/c', past_ts, state=ShardRange.SHARDED)
parent_range = ShardRange(ShardRange.make_path(
'.shards_a', 'c', root_sr.container,
past_ts, 0),
past_ts, 'a', 'f', object_count=1,
state=ShardRange.CLEAVED)
child_ranges = [
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_range.container, past_ts, 0),
past_ts, lower='a', upper='c', object_count=1,
state=ShardRange.CLEAVED),
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_range.container, past_ts, 1),
past_ts, lower='c', upper='f', object_count=1,
state=ShardRange.CLEAVED)]
self.assertTrue(find_overlapping_ranges([parent_range] + child_ranges))
broker = self._make_broker()
# The case of transient overlapping within reclaim_age.
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'has_overlap': 0, 'num_overlap': 0}
broker.merge_shard_ranges([parent_range] + child_ranges)
with mock.patch('swift.container.sharder.time.time',
return_value=float(now_ts) - 10):
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
self._assert_stats(expected_stats, sharder, 'audit_root')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
mocked.assert_not_called()
# The case of overlapping past reclaim_age.
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'has_overlap': 1, 'num_overlap': 2}
with mock.patch('swift.container.sharder.time.time',
return_value=float(now_ts)):
with self._mock_sharder() as sharder:
with mock.patch.object(
sharder, '_audit_shard_container') as mocked:
sharder._audit_container(broker)
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Audit failed for root', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self._assert_stats(expected_stats, sharder, 'audit_root')
mocked.assert_not_called()
def test_audit_deleted_root_container(self):
broker = self._make_broker()
shard_bounds = (
('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'))
shard_ranges = self._make_shard_ranges(shard_bounds, ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.is_root_container())
with self._mock_sharder() as sharder:
sharder._audit_container(broker)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# delete it
delete_ts = next(self.ts_iter)
broker.delete_db(delete_ts.internal)
with self._mock_sharder() as sharder:
sharder._audit_container(broker)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# advance time
future_time = 6048000 + float(delete_ts)
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._audit_container(broker)
self.assertEqual(
['Reclaimable db stuck waiting for shrinking, path: %s, db: %s'
% (broker.path, broker.db_file)],
self.logger.get_lines_for_level('warning'))
# delete all shard ranges
for sr in shard_ranges:
sr.update_state(ShardRange.SHRUNK, Timestamp.now())
sr.deleted = True
sr.timestamp = Timestamp.now()
broker.merge_shard_ranges(shard_ranges)
# no more warning
with mock.patch(
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._audit_container(broker)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
def call_audit_container(self, broker, shard_ranges, exc=None):
with self._mock_sharder() as sharder:
with mock.patch.object(sharder, '_audit_root_container') \
as mocked, mock.patch.object(
sharder, 'int_client') as mock_swift:
mock_response = mock.MagicMock()
mock_response.headers = {'x-backend-record-type':
'shard'}
shard_ranges.sort(key=ShardRange.sort_key)
mock_response.body = json.dumps(
[dict(sr) for sr in shard_ranges])
mock_swift.make_request.return_value = mock_response
mock_swift.make_request.side_effect = exc
mock_swift.make_path = (lambda a, c:
'/v1/%s/%s' % (a, c))
sharder.reclaim_age = 0
sharder._audit_container(broker)
mocked.assert_not_called()
return sharder, mock_swift
def assert_no_audit_messages(self, sharder, mock_swift,
marker='k', end_marker='t'):
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
self._assert_stats(expected_stats, sharder, 'audit_shard')
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': marker, 'end_marker': end_marker,
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
def _do_test_audit_shard_container(self, *args):
# include overlaps to verify correct match for updating own shard range
broker = self._make_broker(account='.shards_a', container='shard_c')
broker.set_sharding_sysmeta(*args)
shard_bounds = (
('a', 'j'), ('k', 't'), ('k', 'u'), ('l', 'v'), ('s', 'z'))
shard_states = (
ShardRange.ACTIVE, ShardRange.ACTIVE, ShardRange.ACTIVE,
ShardRange.FOUND, ShardRange.CREATED
)
shard_ranges = self._make_shard_ranges(shard_bounds, shard_states,
timestamp=next(self.ts_iter))
shard_ranges[1].name = broker.path
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1}
# bad account name
broker.account = 'bad_account'
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
lines = sharder.logger.get_lines_for_level('warning')
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertIn('Audit failed for shard', lines[0])
self.assertIn('missing own shard range', lines[0])
self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
lines[0])
self.assertIn('Audit warnings for shard', lines[1])
self.assertIn('account not in shards namespace', lines[1])
self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
lines[1])
self.assertNotIn('root has no matching shard range', lines[1])
self.assertNotIn('unable to get shard ranges from root', lines[1])
self.assertFalse(lines[2:])
self.assertFalse(broker.is_deleted())
# missing own shard range
broker.get_info()
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
lines = sharder.logger.get_lines_for_level('warning')
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertIn('Audit failed for shard', lines[0])
self.assertIn('missing own shard range', lines[0])
self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
lines[0])
self.assertNotIn('unable to get shard ranges from root', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
# own shard range bounds don't match what's in root (e.g. this shard is
# expanding to be an acceptor)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
with mock_timestamp_now(next(self.ts_iter)):
own_shard_range = broker.get_own_shard_range() # get the default
own_shard_range.lower = 'j'
own_shard_range.upper = 'k'
own_shard_range.name = broker.path
broker.merge_shard_ranges([own_shard_range])
# bump timestamp of root shard range to be newer than own
root_ts = next(self.ts_iter)
self.assertTrue(shard_ranges[1].update_state(ShardRange.ACTIVE,
state_timestamp=root_ts))
shard_ranges[1].timestamp = root_ts
with mock_timestamp_now():
sharder, mock_swift = self.call_audit_container(
broker, shard_ranges)
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertEqual(['Updating own shard range from root, path: '
'.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
expected = shard_ranges[1].copy()
self.assertEqual(
['Updated own shard range from %s to %s, path: .shards_a/shard_c, '
'db: %s' % (own_shard_range, expected, broker.db_file)],
sharder.logger.get_lines_for_level('info'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': 'j', 'end_marker': 'k',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
# own shard range bounds are updated from root version
own_shard_range = broker.get_own_shard_range()
self.assertEqual(ShardRange.ACTIVE, own_shard_range.state)
self.assertEqual(root_ts, own_shard_range.state_timestamp)
self.assertEqual('k', own_shard_range.lower)
self.assertEqual('t', own_shard_range.upper)
# check other shard ranges from root are not merged (not shrinking)
self.assertEqual([own_shard_range],
broker.get_shard_ranges(include_own=True))
# move root version of own shard range to shrinking state
root_ts = next(self.ts_iter)
self.assertTrue(shard_ranges[1].update_state(ShardRange.SHRINKING,
state_timestamp=root_ts))
# bump own shard range state timestamp so it is newer than root
own_ts = next(self.ts_iter)
own_shard_range = broker.get_own_shard_range()
own_shard_range.update_state(ShardRange.ACTIVE, state_timestamp=own_ts)
broker.merge_shard_ranges([own_shard_range])
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertEqual(['Updating own shard range from root, path: '
'.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': 'k', 'end_marker': 't',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
# check own shard range bounds
own_shard_range = broker.get_own_shard_range()
# own shard range state has not changed (root is older)
self.assertEqual(ShardRange.ACTIVE, own_shard_range.state)
self.assertEqual(own_ts, own_shard_range.state_timestamp)
self.assertEqual('k', own_shard_range.lower)
self.assertEqual('t', own_shard_range.upper)
# reset own shard range bounds, failed response from root
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
own_shard_range = broker.get_own_shard_range() # get the default
own_shard_range.lower = 'j'
own_shard_range.upper = 'k'
own_shard_range.timestamp = next(self.ts_iter)
broker.merge_shard_ranges([own_shard_range])
sharder, mock_swift = self.call_audit_container(
broker, shard_ranges,
exc=internal_client.UnexpectedResponse('bad', 'resp'))
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Failed to get shard ranges', lines[0])
self.assertIn('Audit warnings for shard', lines[1])
self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
lines[1])
self.assertNotIn('account not in shards namespace', lines[1])
self.assertNotIn('missing own shard range', lines[1])
self.assertNotIn('root has no matching shard range', lines[1])
self.assertIn('unable to get shard ranges from root', lines[1])
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertFalse(lines[2:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
params = {'format': 'json', 'marker': 'j', 'end_marker': 'k',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
# make own shard range match one in root, but different state
own_ts = next(self.ts_iter)
shard_ranges[1].timestamp = own_ts
own_shard_range = shard_ranges[1].copy()
broker.merge_shard_ranges([own_shard_range])
root_ts = next(self.ts_iter)
shard_ranges[1].update_state(ShardRange.SHARDING,
state_timestamp=root_ts)
with mock_timestamp_now():
sharder, mock_swift = self.call_audit_container(
broker, shard_ranges)
self.assert_no_audit_messages(sharder, mock_swift)
self.assertFalse(broker.is_deleted())
self.assertEqual(['Updating own shard range from root, path: '
'.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
expected = shard_ranges[1].copy()
self.assertEqual(
['Updated own shard range from %s to %s, path: .shards_a/shard_c, '
'db: %s' % (own_shard_range, expected, broker.db_file)],
sharder.logger.get_lines_for_level('info'))
# own shard range state is updated from root version
own_shard_range = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDING, own_shard_range.state)
self.assertEqual(root_ts, own_shard_range.state_timestamp)
own_shard_range.update_state(ShardRange.SHARDED,
state_timestamp=next(self.ts_iter))
broker.merge_shard_ranges([own_shard_range])
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
self.assert_no_audit_messages(sharder, mock_swift)
own_shard_range.deleted = 1
own_shard_range.timestamp = next(self.ts_iter)
broker.merge_shard_ranges([own_shard_range])
# mocks for delete/reclaim time comparisons
with mock_timestamp_now(next(self.ts_iter)):
with mock.patch('swift.container.sharder.time.time',
lambda: float(next(self.ts_iter))):
sharder, mock_swift = self.call_audit_container(broker,
shard_ranges)
self.assert_no_audit_messages(sharder, mock_swift)
self.assertTrue(broker.is_deleted())
def test_audit_old_style_shard_container(self):
self._do_test_audit_shard_container('Root', 'a/c')
def test_audit_shard_container(self):
self._do_test_audit_shard_container('Quoted-Root', 'a/c')
def _do_test_audit_shard_container_merge_other_ranges(self, *args):
# verify that shard only merges other ranges from root when it is
# cleaving
shard_bounds = (
('a', 'p'), ('k', 't'), ('p', 'u'))
shard_states = (
ShardRange.ACTIVE, ShardRange.ACTIVE, ShardRange.FOUND,
)
shard_ranges = self._make_shard_ranges(shard_bounds, shard_states)
def check_audit(own_state, root_state):
shard_container = 'shard_c_%s' % root_ts.normal
broker = self._make_broker(account='.shards_a',
container=shard_container)
broker.set_sharding_sysmeta(*args)
shard_ranges[1].name = broker.path
# make shard's own shard range match shard_ranges[1]
own_sr = shard_ranges[1]
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
self.assertTrue(own_sr.update_state(own_state,
state_timestamp=own_ts))
own_sr.timestamp = own_ts
broker.merge_shard_ranges([shard_ranges[1]])
# bump state and timestamp of root shard_ranges[1] to be newer
self.assertTrue(shard_ranges[1].update_state(
root_state, state_timestamp=root_ts))
shard_ranges[1].timestamp = root_ts
sharder, mock_swift = self.call_audit_container(broker,
shard_ranges)
self._assert_stats(expected_stats, sharder, 'audit_shard')
debug_lines = sharder.logger.get_lines_for_level('debug')
self.assertGreater(len(debug_lines), 0)
self.assertEqual(
'Updating own shard range from root, path: .shards_a/%s, '
'db: %s' % (shard_container, broker.db_file),
sharder.logger.get_lines_for_level('debug')[0])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': 'k', 'end_marker': 't',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
return broker, shard_ranges
# make root's copy of shard range newer than shard's local copy, so
# shard will always update its own shard range from root, and may merge
# other shard ranges
for own_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure('own_state=%s, root_state=%s' %
(own_state, root_state)):
own_ts = next(self.ts_iter)
root_ts = next(self.ts_iter)
broker, shard_ranges = check_audit(own_state, root_state)
# own shard range is updated from newer root version
own_shard_range = broker.get_own_shard_range()
self.assertEqual(root_state, own_shard_range.state)
self.assertEqual(root_ts, own_shard_range.state_timestamp)
updated_ranges = broker.get_shard_ranges(include_own=True)
if root_state in ShardRange.CLEAVING_STATES:
# check other shard ranges from root are merged
self.assertEqual(shard_ranges, updated_ranges)
else:
# check other shard ranges from root are not merged
self.assertEqual(shard_ranges[1:2], updated_ranges)
# make root's copy of shard range older than shard's local copy, so
# shard will never update its own shard range from root, but may merge
# other shard ranges
for own_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure('own_state=%s, root_state=%s' %
(own_state, root_state)):
root_ts = next(self.ts_iter)
own_ts = next(self.ts_iter)
broker, shard_ranges = check_audit(own_state, root_state)
# own shard range is not updated from older root version
own_shard_range = broker.get_own_shard_range()
self.assertEqual(own_state, own_shard_range.state)
self.assertEqual(own_ts, own_shard_range.state_timestamp)
updated_ranges = broker.get_shard_ranges(include_own=True)
if own_state in ShardRange.CLEAVING_STATES:
# check other shard ranges from root are merged
self.assertEqual(shard_ranges, updated_ranges)
else:
# check other shard ranges from root are not merged
self.assertEqual(shard_ranges[1:2], updated_ranges)
def test_audit_old_style_shard_container_merge_other_ranges(self):
self._do_test_audit_shard_container_merge_other_ranges('Root', 'a/c')
def test_audit_shard_container_merge_other_ranges(self):
self._do_test_audit_shard_container_merge_other_ranges('Quoted-Root',
'a/c')
def _assert_merge_into_shard(self, own_shard_range, shard_ranges,
root_shard_ranges, expected, *args, **kwargs):
# create a shard broker, initialise with shard_ranges, run audit on it
# supplying given root_shard_ranges and verify that the broker ends up
# with expected shard ranges.
broker = self._make_broker(account=own_shard_range.account,
container=own_shard_range.container)
broker.set_sharding_sysmeta(*args)
broker.merge_shard_ranges([own_shard_range] + shard_ranges)
db_state = kwargs.get('db_state', UNSHARDED)
if db_state == SHARDING:
broker.set_sharding_state()
if db_state == SHARDED:
broker.set_sharding_state()
broker.set_sharded_state()
self.assertEqual(db_state, broker.get_db_state())
self.assertFalse(broker.is_root_container())
sharder, mock_swift = self.call_audit_container(
broker, root_shard_ranges)
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': 'a', 'end_marker': 'b',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
self._assert_shard_ranges_equal(expected, broker.get_shard_ranges())
self.assertEqual(own_shard_range,
broker.get_own_shard_range(no_default=True))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
self._assert_stats(expected_stats, sharder, 'audit_shard')
return sharder
def _do_test_audit_shard_root_ranges_not_merged(self, *args):
# Make root and other ranges that fully contain the shard namespace...
root_own_sr = ShardRange('a/c', next(self.ts_iter))
acceptor = ShardRange(
str(ShardName.create('.shards_a', 'c', 'c',
next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c')
def do_test(own_state, acceptor_state, root_state):
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
own_sr_name = ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)
own_sr = ShardRange(
str(own_sr_name), next(self.ts_iter), state=own_state,
state_timestamp=next(self.ts_iter), lower='a', upper='b')
expected = existing = []
sharder = self._assert_merge_into_shard(
own_sr, existing,
[own_sr, acceptor_from_root, root_from_root],
expected, *args)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.STATES:
if own_state in ShardRange.CLEAVING_STATES:
# cleaving states are covered by other tests
continue
for acceptor_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
do_test(own_state, acceptor_state, root_state)
def test_audit_old_style_shard_root_ranges_not_merged_not_cleaving(self):
# verify that other shard ranges from root are NOT merged into shard
# when it is NOT in a cleaving state
self._do_test_audit_shard_root_ranges_not_merged('Root', 'a/c')
def test_audit_shard_root_ranges_not_merged_not_cleaving(self):
# verify that other shard ranges from root are NOT merged into shard
# when it is NOT in a cleaving state
self._do_test_audit_shard_root_ranges_not_merged('Quoted-Root', 'a/c')
def test_audit_shard_root_ranges_with_own_merged_while_shrinking(self):
# Verify that shrinking shard will merge other ranges, but not
# in-ACTIVE root range.
# Make root and other ranges that fully contain the shard namespace...
root_own_sr = ShardRange('a/c', next(self.ts_iter))
acceptor = ShardRange(
str(ShardName.create('.shards_a', 'c', 'c',
next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c')
def do_test(own_state, acceptor_state, root_state):
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
ts = next(self.ts_iter)
own_sr = ShardRange(
str(ShardName.create('.shards_a', 'c', 'c', ts, 0)),
ts, lower='a', upper='b', state=own_state, state_timestamp=ts)
expected = [acceptor_from_root]
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
sharder = self._assert_merge_into_shard(
own_sr, [],
# own sr is in ranges fetched from root
[own_sr, acceptor_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
for acceptor_state in ShardRange.STATES:
if acceptor_state in ShardRange.CLEAVING_STATES:
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
if root_state == ShardRange.ACTIVE:
# special case: ACTIVE root *is* merged
continue
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_missing_own_merged_while_shrinking(self):
# Verify that shrinking shard will merge other ranges, but not
# in-ACTIVE root range, even when root does not have shard's own range.
# Make root and other ranges that fully contain the shard namespace...
root_own_sr = ShardRange('a/c', next(self.ts_iter))
acceptor = ShardRange(
str(ShardName.create('.shards_a', 'c', 'c',
next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c')
def do_test(own_state, acceptor_state, root_state):
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
ts = next(self.ts_iter)
own_sr = ShardRange(
str(ShardName.create('.shards_a', 'c', 'c', ts, 0)),
ts, lower='a', upper='b', state=own_state, state_timestamp=ts)
expected = [acceptor_from_root]
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
sharder = self._assert_merge_into_shard(
own_sr, [],
# own sr is NOT in ranges fetched from root
[acceptor_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c')
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines))
self.assertIn('root has no matching shard range',
warning_lines[0])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
for acceptor_state in ShardRange.STATES:
if acceptor_state in ShardRange.CLEAVING_STATES:
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
if root_state == ShardRange.ACTIVE:
# special case: ACTIVE root *is* merged
continue
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_range_not_merged_while_shrinking(self):
# Verify that shrinking shard will not merge an in-active root range
def do_test(own_state, root_state):
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
expected = []
sharder = self._assert_merge_into_shard(
own_sr, [], [own_sr, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
for root_state in ShardRange.STATES:
if root_state == ShardRange.ACTIVE:
continue # special case tested below
with annotate_failure((own_state, root_state)):
do_test(own_state, root_state)
def test_audit_shard_root_range_overlap_not_merged_while_shrinking(self):
# Verify that shrinking shard will not merge an active root range that
# overlaps with an exosting sub-shard
def do_test(own_state):
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.ACTIVE)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
sub_shard = ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, 0)),
ts, lower='a', upper='ab', state=ShardRange.ACTIVE)
expected = [sub_shard]
sharder = self._assert_merge_into_shard(
own_sr, [sub_shard], [own_sr, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_active_root_range_merged_while_shrinking(self):
# Verify that shrinking shard will merge an active root range
def do_test(own_state):
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.ACTIVE)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
expected = [root_own_sr]
sharder = self._assert_merge_into_shard(
own_sr, [], [own_sr, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_fetch_fails_while_shrinking(self):
# check audit copes with failed response while shard is shrinking
broker = self._make_shrinking_broker(lower='a', upper='b')
own_sr = broker.get_own_shard_range()
sharder, mock_swift = self.call_audit_container(
broker, [], exc=internal_client.UnexpectedResponse('bad', 'resp'))
self.assertEqual([], broker.get_shard_ranges())
self.assertEqual(own_sr, broker.get_own_shard_range(no_default=True))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
self._assert_stats(expected_stats, sharder, 'audit_shard')
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(2, len(warning_lines))
self.assertIn('Failed to get shard ranges from a/c: bad',
warning_lines[0])
self.assertIn('unable to get shard ranges from root',
warning_lines[1])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
def test_audit_shard_root_ranges_merge_while_unsharded(self):
# Verify that unsharded shard with no existing shard ranges will merge
# other ranges, but not root range.
root_own_sr = ShardRange('a/c', next(self.ts_iter))
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
def do_test(own_state, acceptor_state, root_state):
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
expected = [acceptor_from_root]
sharder = self._assert_merge_into_shard(
own_sr, [],
[own_sr, acceptor_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHARDING_STATES:
for acceptor_state in ShardRange.STATES:
if acceptor_state in ShardRange.CLEAVING_STATES:
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_merge_while_sharding(self):
# Verify that sharding shard with no existing shard ranges will merge
# other ranges, but not root range.
root_own_sr = ShardRange('a/c', next(self.ts_iter))
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
def do_test(own_state, acceptor_state, root_state):
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
ts = next(self.ts_iter)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', ts, 0)),
ts, 'a', 'b', epoch=ts, state=own_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
expected = [acceptor_from_root]
sharder = self._assert_merge_into_shard(
own_sr, [],
[own_sr, acceptor_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c', db_state=SHARDING)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHARDING_STATES:
for acceptor_state in ShardRange.STATES:
if acceptor_state in ShardRange.CLEAVING_STATES:
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_not_merged_once_sharded(self):
# Verify that sharded shard will not merge other ranges from root
root_own_sr = ShardRange('a/c', next(self.ts_iter))
# the acceptor complements the single existing sub-shard...
other_sub_shard = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'ab', 'c', state=ShardRange.ACTIVE)
def do_test(own_state, other_sub_shard_state, root_state):
sub_shard_from_root = other_sub_shard.copy(
timestamp=next(self.ts_iter), state=other_sub_shard_state)
ts = next(self.ts_iter)
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', ts, 0)),
ts, 'a', 'b', epoch=ts, state=own_state)
ts = next(self.ts_iter)
sub_shard = ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, 0)),
ts, lower='a', upper='ab', state=ShardRange.ACTIVE)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
expected = [sub_shard]
sharder = self._assert_merge_into_shard(
own_sr, [sub_shard],
[own_sr, sub_shard_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c', db_state=SHARDED)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in (ShardRange.SHARDED, ShardRange.SHRUNK):
for other_sub_shard_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, other_sub_shard_state, root_state)):
do_test(own_state, other_sub_shard_state, root_state)
def test_audit_shard_root_ranges_replace_existing_while_cleaving(self):
# Verify that sharding shard with stale existing sub-shard ranges will
# merge other ranges, but not root range.
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
ts = next(self.ts_iter)
acceptor_sub_shards = [ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', acceptor.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'c'))]
# shard has incomplete existing shard ranges, ranges from root delete
# existing sub-shard and replace with other acceptor sub-shards
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
sub_shard = ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, 0)),
ts, lower='a', upper='ab', state=ShardRange.ACTIVE)
deleted_sub_shard = sub_shard.copy(
timestamp=next(self.ts_iter), state=ShardRange.SHARDED,
deleted=1)
expected = acceptor_sub_shards
sharder = self._assert_merge_into_shard(
own_sr, [sub_shard],
[root_own_sr, own_sr, deleted_sub_shard] + acceptor_sub_shards,
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_supplement_deleted_while_cleaving(self):
# Verify that sharding shard with deleted existing sub-shard ranges
# will merge other ranges while sharding, but not root range.
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
ts = next(self.ts_iter)
acceptor_sub_shards = [ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', acceptor.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'c'))]
# shard already has deleted existing shard ranges
expected = acceptor_sub_shards
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
deleted_sub_shards = [ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, i)),
ts, lower, upper, state=ShardRange.SHARDED, deleted=1)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'b'))]
sharder = self._assert_merge_into_shard(
own_sr, deleted_sub_shards,
[own_sr, root_own_sr] + acceptor_sub_shards,
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_supplement_existing_while_cleaving(self):
# Verify that sharding shard with incomplete existing sub-shard ranges
# will merge other ranges that fill the gap, but not root range.
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
ts = next(self.ts_iter)
acceptor_sub_shards = [ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', acceptor.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'c'))]
# shard has incomplete existing shard ranges and range from root fills
# the gap
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
sub_shard = ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, 0)),
ts, lower='a', upper='ab', state=ShardRange.ACTIVE)
expected = [sub_shard] + acceptor_sub_shards[1:]
sharder = self._assert_merge_into_shard(
own_sr, [sub_shard],
[own_sr, root_own_sr] + acceptor_sub_shards[1:],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_cleaving_not_merged_while_cleaving(self):
# Verify that sharding shard will not merge other ranges that are in a
# cleaving state.
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
def do_test(own_state, acceptor_state, root_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
acceptor_from_root = acceptor.copy(
timestamp=next(self.ts_iter), state=acceptor_state)
if (own_state in ShardRange.SHRINKING_STATES and
root_state == ShardRange.ACTIVE):
# special case: when shrinking, ACTIVE root shard *is* merged
expected = [root_from_root]
else:
expected = []
sharder = self._assert_merge_into_shard(
own_sr, [],
[own_sr, acceptor_from_root, root_from_root],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
# ranges from root that are in a cleaving state are not merged...
for own_state in ShardRange.CLEAVING_STATES:
for acceptor_state in ShardRange.CLEAVING_STATES:
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_overlap_not_merged_while_cleaving_1(self):
# Verify that sharding/shrinking shard will not merge other ranges that
# would create an overlap; shard has complete existing shard ranges,
# newer range from root ignored
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
sub_shards = [ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'b'))]
acceptor_from_root = acceptor.copy(timestamp=next(self.ts_iter))
expected = sub_shards
sharder = self._assert_merge_into_shard(
own_sr, sub_shards,
[own_sr, acceptor_from_root, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_overlap_not_merged_while_cleaving_2(self):
# Verify that sharding/shrinking shard will not merge other ranges that
# would create an overlap; shard has incomplete existing shard ranges
# but ranges from root overlaps
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
ts = next(self.ts_iter)
acceptor_sub_shards = [ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', acceptor.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'c'))]
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
ts = next(self.ts_iter)
sub_shard = ShardRange(
str(ShardName.create(
'.shards_a', 'c', own_sr.container, ts, 0)),
ts, lower='a', upper='abc', state=ShardRange.ACTIVE)
expected = [sub_shard]
sharder = self._assert_merge_into_shard(
own_sr, [sub_shard],
acceptor_sub_shards[1:] + [own_sr, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_root_ranges_with_gap_not_merged_while_cleaving(self):
# Verify that sharding/shrinking shard will not merge other ranges that
# would leave a gap.
root_own_sr = ShardRange('a/c', next(self.ts_iter),
state=ShardRange.SHARDED)
acceptor = ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', 'c', next(self.ts_iter), 1)),
next(self.ts_iter), 'a', 'c', state=ShardRange.ACTIVE)
ts = next(self.ts_iter)
acceptor_sub_shards = [ShardRange(
str(ShardRange.make_path(
'.shards_a', 'c', acceptor.container, ts, i)),
ts, lower, upper, state=ShardRange.ACTIVE)
for i, lower, upper in ((0, 'a', 'ab'), (1, 'ab', 'c'))]
def do_test(own_state):
own_sr = ShardRange(
str(ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)),
next(self.ts_iter), 'a', 'b', state=own_state)
# root ranges have gaps w.r.t. the shard namespace
existing = expected = []
sharder = self._assert_merge_into_shard(
own_sr, existing,
acceptor_sub_shards[:1] + [own_sr, root_own_sr],
expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
do_test(own_state)
def test_audit_shard_container_ancestors_not_merged_while_sharding(self):
# Verify that sharding shard will not merge parent and root shard
# ranges even when the sharding shard has no other ranges
root_sr = ShardRange('a/root', next(self.ts_iter),
state=ShardRange.SHARDED)
grandparent_path = ShardRange.make_path(
'.shards_a', 'root', root_sr.container, next(self.ts_iter), 2)
grandparent_sr = ShardRange(grandparent_path, next(self.ts_iter),
'', 'd', state=ShardRange.ACTIVE)
self.assertTrue(grandparent_sr.is_child_of(root_sr))
parent_path = ShardRange.make_path(
'.shards_a', 'root', grandparent_sr.container, next(self.ts_iter),
2)
parent_sr = ShardRange(parent_path, next(self.ts_iter), '', 'd',
state=ShardRange.ACTIVE)
self.assertTrue(parent_sr.is_child_of(grandparent_sr))
child_path = ShardRange.make_path(
'.shards_a', 'root', parent_sr.container, next(self.ts_iter), 2)
child_own_sr = ShardRange(child_path, next(self.ts_iter), 'a', 'b',
state=ShardRange.SHARDING)
self.assertTrue(child_own_sr.is_child_of(parent_sr))
ranges_from_root = [grandparent_sr, parent_sr, root_sr, child_own_sr]
expected = []
sharder = self._assert_merge_into_shard(
child_own_sr, [], ranges_from_root, expected, 'Quoted-Root', 'a/c')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
def test_audit_shard_container_children_merged_while_sharding(self):
# Verify that sharding shard will always merge children shard ranges
def do_test(child_deleted, child_state):
root_sr = ShardRange('a/root', next(self.ts_iter),
state=ShardRange.SHARDED)
parent_path = ShardRange.make_path(
'.shards_a', 'root', root_sr.container,
next(self.ts_iter), 2)
parent_sr = ShardRange(
parent_path, next(self.ts_iter), 'a', 'd',
state=ShardRange.SHARDING)
child_srs = []
for i, lower, upper in ((0, 'a', 'b'), (0, 'b', 'd')):
child_path = ShardRange.make_path(
'.shards_a', 'root', parent_sr.container,
next(self.ts_iter), i)
child_sr = ShardRange(
child_path, next(self.ts_iter), lower, upper,
state=child_state, deleted=child_deleted)
self.assertTrue(child_sr.is_child_of(parent_sr))
child_srs.append(child_sr)
other_path = ShardRange.make_path(
'.shards_a', 'root', root_sr.container,
next(self.ts_iter), 3) # different index w.r.t. parent
other_sr = ShardRange(
other_path, next(self.ts_iter), 'a', 'd',
state=ShardRange.ACTIVE)
self.assertFalse(other_sr.is_child_of(parent_sr))
# the parent is sharding...
broker = self._make_broker(account=parent_sr.account,
container=parent_sr.container)
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
broker.merge_shard_ranges(parent_sr)
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertEqual([parent_sr],
broker.get_shard_ranges(include_own=True))
ranges_from_root = child_srs + [parent_sr, root_sr, other_sr]
sharder, mock_swift = self.call_audit_container(
broker, ranges_from_root)
expected_headers = {'X-Backend-Record-Type': 'shard',
'X-Newest': 'true',
'X-Backend-Include-Deleted': 'True',
'X-Backend-Override-Deleted': 'true'}
params = {'format': 'json', 'marker': 'a', 'end_marker': 'd',
'states': 'auditing'}
mock_swift.make_request.assert_called_once_with(
'GET', '/v1/a/c', expected_headers,
acceptable_statuses=(2,), params=params)
expected = child_srs + [parent_sr]
if child_deleted:
expected.append(other_sr)
self._assert_shard_ranges_equal(
sorted(expected, key=ShardRange.sort_key),
sorted(broker.get_shard_ranges(
include_own=True, include_deleted=True),
key=ShardRange.sort_key))
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0}
self._assert_stats(expected_stats, sharder, 'audit_shard')
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for child_deleted in (False, True):
for child_state in ShardRange.STATES:
with annotate_failure('deleted: %s, state: %s'
% (child_deleted, child_state)):
do_test(child_deleted, child_state)
def test_audit_shard_container_children_not_merged_once_sharded(self):
# Verify that sharding shard will not merge children shard ranges
# once the DB is sharded (but continues to merge own shard range
# received from root)
root_sr = ShardRange('a/root', next(self.ts_iter),
state=ShardRange.SHARDED)
ts = next(self.ts_iter)
parent_path = ShardRange.make_path(
'.shards_a', 'root', root_sr.container, ts, 2)
parent_sr = ShardRange(
parent_path, ts, 'a', 'b', state=ShardRange.ACTIVE, epoch=ts)
child_srs = []
for i, lower, upper in ((0, 'a', 'ab'), (0, 'ab', 'b')):
child_path = ShardRange.make_path(
'.shards_a', 'root', parent_sr.container,
next(self.ts_iter), i)
child_sr = ShardRange(
child_path, next(self.ts_iter), lower, upper,
state=ShardRange.CLEAVED)
self.assertTrue(child_sr.is_child_of(parent_sr))
child_srs.append(child_sr)
# DB is unsharded...
broker = self._make_broker(account=parent_sr.account,
container=parent_sr.container)
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
broker.merge_shard_ranges(parent_sr)
self.assertEqual(UNSHARDED, broker.get_db_state())
self.assertTrue(parent_sr.update_state(
ShardRange.SHARDING, state_timestamp=next(self.ts_iter)))
ranges_from_root = child_srs + [parent_sr, root_sr]
sharder, _ = self.call_audit_container(broker, ranges_from_root)
# children ranges from root are merged
self._assert_shard_ranges_equal(child_srs, broker.get_shard_ranges())
# own sr from root is merged
self.assertEqual(dict(parent_sr), dict(broker.get_own_shard_range()))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
# DB is sharding...
self.assertTrue(broker.set_sharding_state())
self.assertEqual(SHARDING, broker.get_db_state())
parent_sr.state_timestamp = next(self.ts_iter)
for child_sr in child_srs:
child_sr.update_state(ShardRange.ACTIVE,
state_timestamp=next(self.ts_iter))
sharder, _ = self.call_audit_container(broker, ranges_from_root)
# children ranges from root are merged
self._assert_shard_ranges_equal(child_srs, broker.get_shard_ranges())
# own sr from root is merged
self.assertEqual(dict(parent_sr), dict(broker.get_own_shard_range()))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
# DB is sharded...
self.assertTrue(broker.set_sharded_state())
self.assertEqual(SHARDED, broker.get_db_state())
self.assertTrue(parent_sr.update_state(
ShardRange.SHARDED, state_timestamp=next(self.ts_iter)))
updated_child_srs = [
child_sr.copy(state=ShardRange.SHARDING,
state_timestamp=next(self.ts_iter))
for child_sr in child_srs]
ranges_from_root = updated_child_srs + [parent_sr, root_sr]
sharder, _ = self.call_audit_container(broker, ranges_from_root)
# children ranges from root are NOT merged
self._assert_shard_ranges_equal(child_srs, broker.get_shard_ranges())
# own sr from root is merged
self.assertEqual(dict(parent_sr), dict(broker.get_own_shard_range()))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
def test_audit_shard_deleted_range_in_root_container(self):
# verify that shard DB is marked deleted when its own shard range is
# updated with deleted version from root
broker = self._make_broker(account='.shards_a', container='shard_c')
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
own_shard_range = broker.get_own_shard_range()
own_shard_range.lower = 'k'
own_shard_range.upper = 't'
broker.merge_shard_ranges([own_shard_range])
self.assertFalse(broker.is_deleted())
self.assertFalse(broker.is_root_container())
shard_bounds = (
('a', 'j'), ('k', 't'), ('k', 's'), ('l', 's'), ('s', 'z'))
shard_ranges = self._make_shard_ranges(shard_bounds, ShardRange.ACTIVE,
timestamp=next(self.ts_iter))
shard_ranges[1].name = broker.path
shard_ranges[1].update_state(ShardRange.SHARDED,
state_timestamp=next(self.ts_iter))
shard_ranges[1].deleted = 1
# mocks for delete/reclaim time comparisons
with mock_timestamp_now(next(self.ts_iter)):
with mock.patch('swift.container.sharder.time.time',
lambda: float(next(self.ts_iter))):
sharder, mock_swift = self.call_audit_container(broker,
shard_ranges)
self.assert_no_audit_messages(sharder, mock_swift)
self.assertTrue(broker.is_deleted())
def test_audit_shard_deleted_range_missing_from_root_container(self):
# verify that shard DB is marked deleted when its own shard range is
# marked deleted, despite receiving nothing from root
broker = self._make_broker(account='.shards_a', container='shard_c')
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
own_shard_range = broker.get_own_shard_range()
own_shard_range.lower = 'k'
own_shard_range.upper = 't'
own_shard_range.update_state(ShardRange.SHARDED,
state_timestamp=Timestamp.now())
own_shard_range.deleted = 1
broker.merge_shard_ranges([own_shard_range])
self.assertFalse(broker.is_deleted())
self.assertFalse(broker.is_root_container())
sharder, mock_swift = self.call_audit_container(broker, [])
self.assert_no_audit_messages(sharder, mock_swift)
self.assertTrue(broker.is_deleted())
def test_find_and_enable_sharding_candidates(self):
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
shard_bounds = (('', 'here'), ('here', 'there'), ('there', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CLEAVED)
shard_ranges[0].state = ShardRange.ACTIVE
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
with self._mock_sharder() as sharder:
sharder._find_and_enable_sharding_candidates(broker)
# one range just below threshold
shard_ranges[0].update_meta(sharder.shard_container_threshold - 1, 0)
broker.merge_shard_ranges(shard_ranges[0])
with self._mock_sharder() as sharder:
sharder._find_and_enable_sharding_candidates(broker)
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
# two ranges above threshold, only one ACTIVE
shard_ranges[0].update_meta(sharder.shard_container_threshold, 0)
shard_ranges[2].update_meta(sharder.shard_container_threshold + 1, 0)
broker.merge_shard_ranges([shard_ranges[0], shard_ranges[2]])
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._find_and_enable_sharding_candidates(broker)
expected = shard_ranges[0].copy(state=ShardRange.SHARDING,
state_timestamp=now, epoch=now)
self._assert_shard_ranges_equal([expected] + shard_ranges[1:],
broker.get_shard_ranges())
# check idempotency
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._find_and_enable_sharding_candidates(broker)
self._assert_shard_ranges_equal([expected] + shard_ranges[1:],
broker.get_shard_ranges())
# two ranges above threshold, both ACTIVE
shard_ranges[2].update_state(ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges[2])
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._find_and_enable_sharding_candidates(broker)
expected_2 = shard_ranges[2].copy(state=ShardRange.SHARDING,
state_timestamp=now, epoch=now)
self._assert_shard_ranges_equal(
[expected, shard_ranges[1], expected_2], broker.get_shard_ranges())
# check idempotency
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._find_and_enable_sharding_candidates(broker)
self._assert_shard_ranges_equal(
[expected, shard_ranges[1], expected_2], broker.get_shard_ranges())
def test_find_and_enable_sharding_candidates_bootstrap(self):
broker = self._make_broker()
with self._mock_sharder(
conf={'shard_container_threshold': 2}) as sharder:
sharder._find_and_enable_sharding_candidates(broker)
self.assertEqual(ShardRange.ACTIVE, broker.get_own_shard_range().state)
broker.put_object('obj1', next(self.ts_iter).internal, 1, '', '')
broker.put_object('obj2', next(self.ts_iter).internal, 1, '', '')
self.assertEqual(2, broker.get_info()['object_count'])
with self._mock_sharder(
conf={'shard_container_threshold': 2}) as sharder:
with mock_timestamp_now() as now:
own_sr = update_own_shard_range_stats(
broker, broker.get_own_shard_range())
sharder._find_and_enable_sharding_candidates(
broker, [own_sr])
own_sr = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDING, own_sr.state)
self.assertEqual(now, own_sr.state_timestamp)
self.assertEqual(now, own_sr.epoch)
# check idempotency
with self._mock_sharder(
conf={'shard_container_threshold': 2}) as sharder:
with mock_timestamp_now():
own_sr = update_own_shard_range_stats(
broker, broker.get_own_shard_range())
sharder._find_and_enable_sharding_candidates(
broker, [own_sr])
own_sr = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDING, own_sr.state)
self.assertEqual(now, own_sr.state_timestamp)
self.assertEqual(now, own_sr.epoch)
def test_find_and_enable_shrinking_candidates(self):
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
shard_bounds = (('', 'here'), ('here', 'there'), ('there', ''))
size = (DEFAULT_SHARDER_CONF['shrink_threshold'])
# all shard ranges too big to shrink
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE, object_count=size - 1,
tombstones=1)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED, Timestamp.now())
broker.merge_shard_ranges(shard_ranges + [own_sr])
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
with self._mock_sharder() as sharder:
sharder._find_and_enable_shrinking_candidates(broker)
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
# one range just below threshold
shard_ranges[0].update_meta(size - 2, 0)
broker.merge_shard_ranges(shard_ranges[0])
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
acceptor = shard_ranges[1].copy(lower=shard_ranges[0].lower)
acceptor.timestamp = now
donor = shard_ranges[0].copy(state=ShardRange.SHRINKING,
state_timestamp=now, epoch=now)
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, acceptor.account, acceptor.container,
[acceptor]),
mock.call(broker, donor.account, donor.container,
[donor, acceptor])]
)
# check idempotency
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, acceptor.account, acceptor.container,
[acceptor]),
mock.call(broker, donor.account, donor.container,
[donor, acceptor])]
)
# acceptor falls below threshold - not a candidate
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
acceptor.update_meta(0, 0, meta_timestamp=now)
broker.merge_shard_ranges(acceptor)
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, acceptor.account, acceptor.container,
[acceptor]),
mock.call(broker, donor.account, donor.container,
[donor, acceptor])]
)
# ...until donor has shrunk
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
donor.update_state(ShardRange.SHARDED, state_timestamp=now)
donor.set_deleted(timestamp=now)
broker.merge_shard_ranges(donor)
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
new_acceptor = shard_ranges[2].copy(lower=acceptor.lower)
new_acceptor.timestamp = now
new_donor = acceptor.copy(state=ShardRange.SHRINKING,
state_timestamp=now, epoch=now)
self._assert_shard_ranges_equal(
[donor, new_donor, new_acceptor],
broker.get_shard_ranges(include_deleted=True))
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, new_acceptor.account, new_acceptor.container,
[new_acceptor]),
mock.call(broker, new_donor.account, new_donor.container,
[new_donor, new_acceptor])]
)
# ..finally last shard shrinks to root
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
new_donor.update_state(ShardRange.SHARDED, state_timestamp=now)
new_donor.set_deleted(timestamp=now)
new_acceptor.update_meta(0, 0, meta_timestamp=now)
broker.merge_shard_ranges([new_donor, new_acceptor])
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
final_donor = new_acceptor.copy(state=ShardRange.SHRINKING,
state_timestamp=now, epoch=now)
self._assert_shard_ranges_equal(
[donor, new_donor, final_donor],
broker.get_shard_ranges(include_deleted=True))
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, final_donor.account, final_donor.container,
[final_donor, broker.get_own_shard_range()])]
)
def test_find_and_enable_multiple_shrinking_candidates(self):
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
shard_bounds = (('', 'a'), ('a', 'b'), ('b', 'c'),
('c', 'd'), ('d', 'e'), ('e', ''))
size = (DEFAULT_SHARDER_CONF['shrink_threshold'])
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE, object_count=size)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED, Timestamp.now())
broker.merge_shard_ranges(shard_ranges + [own_sr])
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
with self._mock_sharder() as sharder:
sharder._find_and_enable_shrinking_candidates(broker)
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
# three ranges just below threshold
shard_ranges = broker.get_shard_ranges() # get timestamps updated
shard_ranges[0].update_meta(size - 1, 0)
shard_ranges[1].update_meta(size - 1, 0)
shard_ranges[3].update_meta(size - 1, 0)
broker.merge_shard_ranges(shard_ranges)
with self._mock_sharder() as sharder:
with mock_timestamp_now() as now:
sharder._send_shard_ranges = mock.MagicMock()
sharder._find_and_enable_shrinking_candidates(broker)
# 0 shrinks into 1 (only one donor per acceptor is allowed)
shard_ranges[0].update_state(ShardRange.SHRINKING, state_timestamp=now)
shard_ranges[0].epoch = now
shard_ranges[1].lower = shard_ranges[0].lower
shard_ranges[1].timestamp = now
# 3 shrinks into 4
shard_ranges[3].update_state(ShardRange.SHRINKING, state_timestamp=now)
shard_ranges[3].epoch = now
shard_ranges[4].lower = shard_ranges[3].lower
shard_ranges[4].timestamp = now
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
for donor, acceptor in (shard_ranges[:2], shard_ranges[3:5]):
sharder._send_shard_ranges.assert_has_calls(
[mock.call(broker, acceptor.account, acceptor.container,
[acceptor]),
mock.call(broker, donor.account, donor.container,
[donor, acceptor])]
)
def test_partition_and_device_filters(self):
# verify partitions and devices kwargs result in filtering of processed
# containers but not of the local device ids.
ring = FakeRing()
dev_ids = set()
container_data = []
for dev in ring.devs:
dev_ids.add(dev['id'])
part = str(dev['id'])
broker = self._make_broker(
container='c%s' % dev['id'], hash_='c%shash' % dev['id'],
device=dev['device'], part=part)
broker.update_metadata({'X-Container-Sysmeta-Sharding':
('true', next(self.ts_iter).internal)})
container_data.append((broker.path, dev['id'], part))
with self._mock_sharder() as sharder:
sharder.ring = ring
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once()
self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set(container_data),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
with self._mock_sharder() as sharder:
sharder.ring = ring
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='0')
self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[0]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
with self._mock_sharder() as sharder:
sharder.ring = ring
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='2,0')
self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[0], container_data[2]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
with self._mock_sharder() as sharder:
sharder.ring = ring
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='2,0', devices='sdc')
self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[2]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
with self._mock_sharder() as sharder:
sharder.ring = ring
sharder._check_node = lambda node: os.path.join(
sharder.conf['devices'], node['device'])
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(devices='sdb,sdc')
self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set(container_data[1:]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
def test_audit_cleave_contexts(self):
def add_cleave_context(id, last_modified, cleaving_done):
params = {'ref': id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': cleaving_done,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % id
with mock_timestamp_now(last_modified):
broker.update_metadata(
{key: (json.dumps(params),
last_modified.internal)})
def get_context(id, broker):
data = broker.get_sharding_sysmeta().get('Context-%s' % id)
if data:
return CleavingContext(**json.loads(data))
return data
reclaim_age = 100
recon_sharded_timeout = 50
broker = self._make_broker()
# sanity check
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
# Setup some cleaving contexts
id_old, id_newish, id_complete = [str(uuid4()) for _ in range(3)]
ts_old, ts_newish, ts_complete = (
Timestamp(1),
Timestamp(reclaim_age // 2),
Timestamp(reclaim_age - recon_sharded_timeout))
contexts = ((id_old, ts_old, False),
(id_newish, ts_newish, False),
(id_complete, ts_complete, True))
for id, last_modified, cleaving_done in contexts:
add_cleave_context(id, last_modified, cleaving_done)
sharder_conf = {'reclaim_age': str(reclaim_age),
'recon_sharded_timeout': str(recon_sharded_timeout)}
with self._mock_sharder(sharder_conf) as sharder:
with mock_timestamp_now(Timestamp(reclaim_age + 2)):
sharder._audit_cleave_contexts(broker)
# old context is stale, ie last modified reached reclaim_age and was
# never completed (done).
old_ctx = get_context(id_old, broker)
self.assertEqual(old_ctx, "")
# Newish context is almost stale, as in it's been 1/2 reclaim age since
# it was last modified yet it's not completed. So it haven't been
# cleaned up.
newish_ctx = get_context(id_newish, broker)
self.assertEqual(newish_ctx.ref, id_newish)
# Complete context is complete (done) and it's been
# recon_sharded_timeout time since it was marked completed so it's
# been removed
complete_ctx = get_context(id_complete, broker)
self.assertEqual(complete_ctx, "")
# If we push time another reclaim age later, they are all removed
with self._mock_sharder(sharder_conf) as sharder:
with mock_timestamp_now(Timestamp(reclaim_age * 2)):
sharder._audit_cleave_contexts(broker)
newish_ctx = get_context(id_newish, broker)
self.assertEqual(newish_ctx, "")
def test_shrinking_candidate_recon_dump(self):
conf = {'recon_cache_path': self.tempdir,
'devices': self.tempdir}
shard_bounds = (
('', 'd'), ('d', 'g'), ('g', 'l'), ('l', 'o'), ('o', 't'),
('t', 'x'), ('x', ''))
with self._mock_sharder(conf) as sharder:
brokers = []
shard_ranges = []
C1, C2, C3 = 0, 1, 2
for container in ('c1', 'c2', 'c3'):
broker = self._make_broker(
container=container, hash_=container + 'hash',
device=sharder.ring.devs[0]['device'], part=0)
broker.update_metadata({'X-Container-Sysmeta-Sharding':
('true', next(self.ts_iter).internal)})
my_sr = broker.get_own_shard_range()
my_sr.epoch = Timestamp.now()
broker.merge_shard_ranges([my_sr])
brokers.append(broker)
shard_ranges.append(self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE,
object_count=(
DEFAULT_SHARDER_CONF['shard_container_threshold'] / 2),
timestamp=next(self.ts_iter)))
# we want c2 to have 2 shrink pairs
shard_ranges[C2][1].object_count = 0
shard_ranges[C2][3].object_count = 0
brokers[C2].merge_shard_ranges(shard_ranges[C2])
brokers[C2].set_sharding_state()
brokers[C2].set_sharded_state()
# we want c1 to have the same, but one can't be shrunk
shard_ranges[C1][1].object_count = 0
shard_ranges[C1][2].object_count = \
DEFAULT_SHARDER_CONF['shard_container_threshold'] - 1
shard_ranges[C1][3].object_count = 0
brokers[C1].merge_shard_ranges(shard_ranges[C1])
brokers[C1].set_sharding_state()
brokers[C1].set_sharded_state()
# c3 we want to have more total_sharding donors then can be sharded
# in one go.
shard_ranges[C3][0].object_count = 0
shard_ranges[C3][1].object_count = 0
shard_ranges[C3][2].object_count = 0
shard_ranges[C3][3].object_count = 0
shard_ranges[C3][4].object_count = 0
shard_ranges[C3][5].object_count = 0
brokers[C3].merge_shard_ranges(shard_ranges[C3])
brokers[C3].set_sharding_state()
brokers[C3].set_sharded_state()
node = {'ip': '10.0.0.0', 'replication_ip': '10.0.1.0',
'port': 1000, 'replication_port': 1100,
'device': 'sda', 'zone': 0, 'region': 0, 'id': 1,
'index': 0}
for broker in brokers:
sharder._identify_shrinking_candidate(broker, node)
sharder._report_stats()
expected_shrinking_candidates_data = {
'found': 3,
'top': [
{
'object_count': 500000,
'account': brokers[C3].account,
'meta_timestamp': mock.ANY,
'container': brokers[C3].container,
'file_size': os.stat(brokers[C3].db_file).st_size,
'path': brokers[C3].db_file,
'root': brokers[C3].path,
'node_index': 0,
'compactible_ranges': 3
}, {
'object_count': 2500000,
'account': brokers[C2].account,
'meta_timestamp': mock.ANY,
'container': brokers[C2].container,
'file_size': os.stat(brokers[1].db_file).st_size,
'path': brokers[C2].db_file,
'root': brokers[C2].path,
'node_index': 0,
'compactible_ranges': 2
}, {
'object_count': 2999999,
'account': brokers[C1].account,
'meta_timestamp': mock.ANY,
'container': brokers[C1].container,
'file_size': os.stat(brokers[C1].db_file).st_size,
'path': brokers[C1].db_file,
'root': brokers[C1].path,
'node_index': 0,
'compactible_ranges': 1
}
]}
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
# check shrinking stats are reset
sharder._zero_stats()
for broker in brokers:
sharder._identify_shrinking_candidate(broker, node)
sharder._report_stats()
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
# set some ranges to shrinking and check that stats are updated; in
# this case the container C2 no longer has any shrinkable ranges
# and no longer appears in stats
def shrink_actionable_ranges(broker):
compactible = find_compactible_shard_sequences(
broker, sharder.shrink_threshold, sharder.expansion_limit,
1, -1)
self.assertNotEqual([], compactible)
with mock_timestamp_now(next(self.ts_iter)):
process_compactible_shard_sequences(broker, compactible)
shrink_actionable_ranges(brokers[C2])
sharder._zero_stats()
for broker in brokers:
sharder._identify_shrinking_candidate(broker, node)
sharder._report_stats()
expected_shrinking_candidates_data = {
'found': 2,
'top': [
{
'object_count': mock.ANY,
'account': brokers[C3].account,
'meta_timestamp': mock.ANY,
'container': brokers[C3].container,
'file_size': os.stat(brokers[C3].db_file).st_size,
'path': brokers[C3].db_file,
'root': brokers[C3].path,
'node_index': 0,
'compactible_ranges': 3
}, {
'object_count': mock.ANY,
'account': brokers[C1].account,
'meta_timestamp': mock.ANY,
'container': brokers[C1].container,
'file_size': os.stat(brokers[C1].db_file).st_size,
'path': brokers[C1].db_file,
'root': brokers[C1].path,
'node_index': 0,
'compactible_ranges': 1
}
]}
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
# set some ranges to shrinking and check that stats are updated; in
# this case the container C3 no longer has any actionable ranges
# and no longer appears in stats
shrink_actionable_ranges(brokers[C3])
sharder._zero_stats()
for broker in brokers:
sharder._identify_shrinking_candidate(broker, node)
sharder._report_stats()
expected_shrinking_candidates_data = {
'found': 1,
'top': [
{
'object_count': mock.ANY,
'account': brokers[C1].account,
'meta_timestamp': mock.ANY,
'container': brokers[C1].container,
'file_size': os.stat(brokers[C1].db_file).st_size,
'path': brokers[C1].db_file,
'root': brokers[C1].path,
'node_index': 0,
'compactible_ranges': 1
}
]}
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
# set some ranges to shrunk in C3 so that other sequences become
# compactible
now = next(self.ts_iter)
shard_ranges = brokers[C3].get_shard_ranges()
for (donor, acceptor) in zip(shard_ranges, shard_ranges[1:]):
if donor.state == ShardRange.SHRINKING:
donor.update_state(ShardRange.SHRUNK, state_timestamp=now)
donor.set_deleted(timestamp=now)
acceptor.lower = donor.lower
acceptor.timestamp = now
brokers[C3].merge_shard_ranges(shard_ranges)
sharder._zero_stats()
for broker in brokers:
sharder._identify_shrinking_candidate(broker, node)
sharder._report_stats()
expected_shrinking_candidates_data = {
'found': 2,
'top': [
{
'object_count': mock.ANY,
'account': brokers[C3].account,
'meta_timestamp': mock.ANY,
'container': brokers[C3].container,
'file_size': os.stat(brokers[C3].db_file).st_size,
'path': brokers[C3].db_file,
'root': brokers[C3].path,
'node_index': 0,
'compactible_ranges': 2
}, {
'object_count': mock.ANY,
'account': brokers[C1].account,
'meta_timestamp': mock.ANY,
'container': brokers[C1].container,
'file_size': os.stat(brokers[C1].db_file).st_size,
'path': brokers[C1].db_file,
'root': brokers[C1].path,
'node_index': 0,
'compactible_ranges': 1
}
]}
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
@mock.patch('swift.common.ring.ring.Ring.get_part_nodes', return_value=[])
@mock.patch('swift.common.ring.ring.Ring.get_more_nodes', return_value=[])
def test_get_shard_broker_no_local_handoff_for_part(
self, mock_part_nodes, mock_more_nodes):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
# first, let's assume there local_handoff_for_part fails because the
# node we're on is at zero weight for all disks. So it wont appear in
# the replica2part2dev table, meaning we wont get a node back.
# in this case, we'll fall back to one of our own devices which we
# determine from the ring.devs not the replica2part2dev table.
with self._mock_sharder() as sharder:
local_dev_ids = {dev['id']: dev for dev in sharder.ring.devs[-1:]}
sharder._local_device_ids = local_dev_ids
part, shard_broker, node_id, _ = sharder._get_shard_broker(
shard_ranges[0], broker.root_path, 0)
self.assertIn(node_id, local_dev_ids)
# if there are more then 1 local_dev_id it'll randomly pick one
selected_node_ids = set()
for _ in range(10):
with self._mock_sharder() as sharder:
local_dev_ids = {dev['id']: dev
for dev in sharder.ring.devs[-2:]}
sharder._local_device_ids = local_dev_ids
part, shard_broker, node_id, _ = sharder._get_shard_broker(
shard_ranges[0], broker.root_path, 0)
self.assertIn(node_id, local_dev_ids)
selected_node_ids.add(node_id)
if len(selected_node_ids) == 2:
break
self.assertEqual(len(selected_node_ids), 2)
# If there are also no local_dev_ids, then we'll get the RuntimeError
with self._mock_sharder() as sharder:
sharder._local_device_ids = {}
with self.assertRaises(RuntimeError) as dev_err:
sharder._get_shard_broker(shard_ranges[0], broker.root_path, 0)
expected_error_string = 'Cannot find local handoff; no local devices'
self.assertEqual(str(dev_err.exception), expected_error_string)
class TestCleavingContext(BaseTestSharder):
def test_init(self):
ctx = CleavingContext(ref='test')
self.assertEqual('test', ctx.ref)
self.assertEqual('', ctx.cursor)
self.assertIsNone(ctx.max_row)
self.assertIsNone(ctx.cleave_to_row)
self.assertIsNone(ctx.last_cleave_to_row)
self.assertFalse(ctx.misplaced_done)
self.assertFalse(ctx.cleaving_done)
def test_iter(self):
ctx = CleavingContext('test', 'curs', 12, 11, 10, False, True, 0, 4)
expected = {'ref': 'test',
'cursor': 'curs',
'max_row': 12,
'cleave_to_row': 11,
'last_cleave_to_row': 10,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 0,
'ranges_todo': 4}
self.assertEqual(expected, dict(ctx))
def test_cursor(self):
broker = self._make_broker()
ref = CleavingContext._make_ref(broker)
for curs in ('curs', u'curs\u00e4\u00fb'):
with annotate_failure('%r' % curs):
expected = curs.encode('utf-8') if six.PY2 else curs
ctx = CleavingContext(ref, curs, 12, 11, 10, False, True)
self.assertEqual(dict(ctx), {
'cursor': expected,
'max_row': 12,
'cleave_to_row': 11,
'last_cleave_to_row': 10,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 0,
'ranges_todo': 0,
'ref': ref,
})
self.assertEqual(expected, ctx.cursor)
ctx.store(broker)
reloaded_ctx = CleavingContext.load(broker)
self.assertEqual(expected, reloaded_ctx.cursor)
# Since we reloaded, the max row gets updated from the broker
self.assertEqual(reloaded_ctx.max_row, -1)
# reset it so the dict comparison will succeed
reloaded_ctx.max_row = 12
self.assertEqual(dict(ctx), dict(reloaded_ctx))
def test_load(self):
broker = self._make_broker()
for i in range(6):
broker.put_object('o%s' % i, next(self.ts_iter).internal, 10,
'text/plain', 'etag_a', 0)
db_id = broker.get_info()['id']
params = {'ref': db_id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % db_id
broker.update_metadata(
{key: (json.dumps(params), Timestamp.now().internal)})
ctx = CleavingContext.load(broker)
self.assertEqual(db_id, ctx.ref)
self.assertEqual('curs', ctx.cursor)
# note max_row is dynamically updated during load
self.assertEqual(6, ctx.max_row)
self.assertEqual(2, ctx.cleave_to_row)
self.assertEqual(1, ctx.last_cleave_to_row)
self.assertTrue(ctx.misplaced_done)
self.assertFalse(ctx.cleaving_done)
self.assertEqual(2, ctx.ranges_done)
self.assertEqual(4, ctx.ranges_todo)
def test_load_all(self):
broker = self._make_broker()
last_ctx = None
timestamp = Timestamp.now()
db_ids = [str(uuid4()) for _ in range(6)]
for db_id in db_ids:
params = {'ref': db_id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % db_id
broker.update_metadata(
{key: (json.dumps(params), timestamp.internal)})
first_ctx = None
for ctx, lm in CleavingContext.load_all(broker):
if not first_ctx:
first_ctx = ctx
last_ctx = ctx
self.assertIn(ctx.ref, db_ids)
self.assertEqual(lm, timestamp.internal)
# If a context is deleted (metadata is "") then it's skipped
last_ctx.delete(broker)
db_ids.remove(last_ctx.ref)
# and let's modify the first
with mock_timestamp_now() as new_timestamp:
first_ctx.store(broker)
for ctx, lm in CleavingContext.load_all(broker):
self.assertIn(ctx.ref, db_ids)
if ctx.ref == first_ctx.ref:
self.assertEqual(lm, new_timestamp.internal)
else:
self.assertEqual(lm, timestamp.internal)
# delete all contexts
for ctx, lm in CleavingContext.load_all(broker):
ctx.delete(broker)
self.assertEqual([], CleavingContext.load_all(broker))
def test_delete(self):
broker = self._make_broker()
db_id = broker.get_info()['id']
params = {'ref': db_id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % db_id
broker.update_metadata(
{key: (json.dumps(params), Timestamp.now().internal)})
ctx = CleavingContext.load(broker)
self.assertEqual(db_id, ctx.ref)
# Now let's delete it. When deleted the metadata key will exist, but
# the value will be "" as this means it'll be reaped later.
ctx.delete(broker)
sysmeta = broker.get_sharding_sysmeta()
for key, val in sysmeta.items():
if key == "Context-%s" % db_id:
self.assertEqual(val, "")
break
else:
self.fail("Deleted context 'Context-%s' not found")
def test_store_old_style(self):
broker = self._make_old_style_sharding_broker()
old_db_id = broker.get_brokers()[0].get_info()['id']
last_mod = Timestamp.now()
ctx = CleavingContext(old_db_id, 'curs', 12, 11, 2, True, True, 2, 4)
with mock_timestamp_now(last_mod):
ctx.store(broker)
key = 'X-Container-Sysmeta-Shard-Context-%s' % old_db_id
data = json.loads(broker.metadata[key][0])
expected = {'ref': old_db_id,
'cursor': 'curs',
'max_row': 12,
'cleave_to_row': 11,
'last_cleave_to_row': 2,
'cleaving_done': True,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
self.assertEqual(expected, data)
# last modified is the metadata timestamp
self.assertEqual(broker.metadata[key][1], last_mod.internal)
def test_store_add_row_load_old_style(self):
# adding row to older db changes only max_row in the context
broker = self._make_old_style_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
old_max_row = old_broker.get_max_row()
self.assertEqual(1, old_max_row) # sanity check
ctx = CleavingContext(old_db_id, 'curs', 1, 1, 0, True, True)
ctx.store(broker)
# adding a row changes max row
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
new_ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, new_ctx.ref)
self.assertEqual('curs', new_ctx.cursor)
self.assertEqual(2, new_ctx.max_row)
self.assertEqual(1, new_ctx.cleave_to_row)
self.assertEqual(0, new_ctx.last_cleave_to_row)
self.assertTrue(new_ctx.misplaced_done)
self.assertTrue(new_ctx.cleaving_done)
def test_store_reclaim_load_old_style(self):
# reclaiming rows from older db does not change context
broker = self._make_old_style_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
old_max_row = old_broker.get_max_row()
self.assertEqual(1, old_max_row) # sanity check
ctx = CleavingContext(old_db_id, 'curs', 1, 1, 0, True, True)
ctx.store(broker)
self.assertEqual(
1, len(old_broker.get_objects()))
now = next(self.ts_iter).internal
broker.get_brokers()[0].reclaim(now, now)
self.assertFalse(old_broker.get_objects())
new_ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, new_ctx.ref)
self.assertEqual('curs', new_ctx.cursor)
self.assertEqual(1, new_ctx.max_row)
self.assertEqual(1, new_ctx.cleave_to_row)
self.assertEqual(0, new_ctx.last_cleave_to_row)
self.assertTrue(new_ctx.misplaced_done)
self.assertTrue(new_ctx.cleaving_done)
def test_store_modify_db_id_load_old_style(self):
# changing id changes ref, so results in a fresh context
broker = self._make_old_style_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
ctx = CleavingContext(old_db_id, 'curs', 12, 11, 2, True, True)
ctx.store(broker)
old_broker.newid('fake_remote_id')
new_db_id = old_broker.get_info()['id']
self.assertNotEqual(old_db_id, new_db_id)
new_ctx = CleavingContext.load(broker)
self.assertEqual(new_db_id, new_ctx.ref)
self.assertEqual('', new_ctx.cursor)
# note max_row is dynamically updated during load
self.assertEqual(-1, new_ctx.max_row)
self.assertEqual(None, new_ctx.cleave_to_row)
self.assertEqual(None, new_ctx.last_cleave_to_row)
self.assertFalse(new_ctx.misplaced_done)
self.assertFalse(new_ctx.cleaving_done)
def test_load_modify_store_load_old_style(self):
broker = self._make_old_style_sharding_broker()
old_db_id = broker.get_brokers()[0].get_info()['id']
ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, ctx.ref)
self.assertEqual('', ctx.cursor) # sanity check
ctx.cursor = 'curs'
ctx.misplaced_done = True
ctx.store(broker)
ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, ctx.ref)
self.assertEqual('curs', ctx.cursor)
self.assertTrue(ctx.misplaced_done)
def test_store(self):
broker = self._make_sharding_broker()
old_db_id = broker.get_brokers()[0].get_info()['id']
last_mod = Timestamp.now()
ctx = CleavingContext(old_db_id, 'curs', 12, 11, 2, True, True, 2, 4)
with mock_timestamp_now(last_mod):
ctx.store(broker)
key = 'X-Container-Sysmeta-Shard-Context-%s' % old_db_id
data = json.loads(broker.metadata[key][0])
expected = {'ref': old_db_id,
'cursor': 'curs',
'max_row': 12,
'cleave_to_row': 11,
'last_cleave_to_row': 2,
'cleaving_done': True,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
self.assertEqual(expected, data)
# last modified is the metadata timestamp
self.assertEqual(broker.metadata[key][1], last_mod.internal)
def test_store_add_row_load(self):
# adding row to older db changes only max_row in the context
broker = self._make_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
old_max_row = old_broker.get_max_row()
self.assertEqual(1, old_max_row) # sanity check
ctx = CleavingContext(old_db_id, 'curs', 1, 1, 0, True, True)
ctx.store(broker)
# adding a row changes max row
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
new_ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, new_ctx.ref)
self.assertEqual('curs', new_ctx.cursor)
self.assertEqual(2, new_ctx.max_row)
self.assertEqual(1, new_ctx.cleave_to_row)
self.assertEqual(0, new_ctx.last_cleave_to_row)
self.assertTrue(new_ctx.misplaced_done)
self.assertTrue(new_ctx.cleaving_done)
def test_store_reclaim_load(self):
# reclaiming rows from older db does not change context
broker = self._make_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
old_broker.merge_items([old_broker._record_to_dict(
('obj', next(self.ts_iter).internal, 0, 'text/plain', 'etag', 1))])
old_max_row = old_broker.get_max_row()
self.assertEqual(1, old_max_row) # sanity check
ctx = CleavingContext(old_db_id, 'curs', 1, 1, 0, True, True)
ctx.store(broker)
self.assertEqual(
1, len(old_broker.get_objects()))
now = next(self.ts_iter).internal
broker.get_brokers()[0].reclaim(now, now)
self.assertFalse(old_broker.get_objects())
new_ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, new_ctx.ref)
self.assertEqual('curs', new_ctx.cursor)
self.assertEqual(1, new_ctx.max_row)
self.assertEqual(1, new_ctx.cleave_to_row)
self.assertEqual(0, new_ctx.last_cleave_to_row)
self.assertTrue(new_ctx.misplaced_done)
self.assertTrue(new_ctx.cleaving_done)
def test_store_modify_db_id_load(self):
# changing id changes ref, so results in a fresh context
broker = self._make_sharding_broker()
old_broker = broker.get_brokers()[0]
old_db_id = old_broker.get_info()['id']
ctx = CleavingContext(old_db_id, 'curs', 12, 11, 2, True, True)
ctx.store(broker)
old_broker.newid('fake_remote_id')
new_db_id = old_broker.get_info()['id']
self.assertNotEqual(old_db_id, new_db_id)
new_ctx = CleavingContext.load(broker)
self.assertEqual(new_db_id, new_ctx.ref)
self.assertEqual('', new_ctx.cursor)
# note max_row is dynamically updated during load
self.assertEqual(-1, new_ctx.max_row)
self.assertEqual(None, new_ctx.cleave_to_row)
self.assertEqual(None, new_ctx.last_cleave_to_row)
self.assertFalse(new_ctx.misplaced_done)
self.assertFalse(new_ctx.cleaving_done)
def test_load_modify_store_load(self):
broker = self._make_sharding_broker()
old_db_id = broker.get_brokers()[0].get_info()['id']
ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, ctx.ref)
self.assertEqual('', ctx.cursor) # sanity check
ctx.cursor = 'curs'
ctx.misplaced_done = True
ctx.store(broker)
ctx = CleavingContext.load(broker)
self.assertEqual(old_db_id, ctx.ref)
self.assertEqual('curs', ctx.cursor)
self.assertTrue(ctx.misplaced_done)
def test_reset(self):
ctx = CleavingContext('test', 'curs', 12, 11, 2, True, True)
def check_context():
self.assertEqual('test', ctx.ref)
self.assertEqual('', ctx.cursor)
self.assertEqual(12, ctx.max_row)
self.assertEqual(11, ctx.cleave_to_row)
self.assertEqual(11, ctx.last_cleave_to_row)
self.assertFalse(ctx.misplaced_done)
self.assertFalse(ctx.cleaving_done)
self.assertEqual(0, ctx.ranges_done)
self.assertEqual(0, ctx.ranges_todo)
ctx.reset()
check_context()
# check idempotency
ctx.reset()
check_context()
def test_start(self):
ctx = CleavingContext('test', 'curs', 12, 11, 2, True, True)
def check_context():
self.assertEqual('test', ctx.ref)
self.assertEqual('', ctx.cursor)
self.assertEqual(12, ctx.max_row)
self.assertEqual(12, ctx.cleave_to_row)
self.assertEqual(2, ctx.last_cleave_to_row)
self.assertTrue(ctx.misplaced_done) # *not* reset here
self.assertFalse(ctx.cleaving_done)
self.assertEqual(0, ctx.ranges_done)
self.assertEqual(0, ctx.ranges_todo)
ctx.start()
check_context()
# check idempotency
ctx.start()
check_context()
def test_range_done(self):
ctx = CleavingContext('test', '', 12, 11, 2, True, True)
self.assertEqual(0, ctx.ranges_done)
self.assertEqual(0, ctx.ranges_todo)
self.assertEqual('', ctx.cursor)
ctx.ranges_todo = 5
ctx.range_done('b')
self.assertEqual(1, ctx.ranges_done)
self.assertEqual(4, ctx.ranges_todo)
self.assertEqual('b', ctx.cursor)
ctx.ranges_todo = 9
ctx.range_done('c')
self.assertEqual(2, ctx.ranges_done)
self.assertEqual(8, ctx.ranges_todo)
self.assertEqual('c', ctx.cursor)
def test_done(self):
ctx = CleavingContext(
'test', '', max_row=12, cleave_to_row=12, last_cleave_to_row=2,
cleaving_done=True, misplaced_done=True)
self.assertTrue(ctx.done())
ctx = CleavingContext(
'test', '', max_row=12, cleave_to_row=11, last_cleave_to_row=2,
cleaving_done=True, misplaced_done=True)
self.assertFalse(ctx.done())
ctx = CleavingContext(
'test', '', max_row=12, cleave_to_row=12, last_cleave_to_row=2,
cleaving_done=True, misplaced_done=False)
self.assertFalse(ctx.done())
ctx = CleavingContext(
'test', '', max_row=12, cleave_to_row=12, last_cleave_to_row=2,
cleaving_done=False, misplaced_done=True)
self.assertFalse(ctx.done())
class TestSharderFunctions(BaseTestSharder):
def test_find_shrinking_candidates(self):
broker = self._make_broker()
shard_bounds = (('', 'a'), ('a', 'b'), ('b', 'c'), ('c', 'd'))
threshold = (DEFAULT_SHARDER_CONF['shrink_threshold'])
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE, object_count=threshold,
timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
pairs = find_shrinking_candidates(broker, threshold, threshold * 4)
self.assertEqual({}, pairs)
# one range just below threshold
shard_ranges[0].update_meta(threshold - 1, 0,
meta_timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges[0])
pairs = find_shrinking_candidates(broker, threshold, threshold * 4)
self.assertEqual(1, len(pairs), pairs)
for acceptor, donor in pairs.items():
self.assertEqual(shard_ranges[1], acceptor)
self.assertEqual(shard_ranges[0], donor)
# two ranges just below threshold
shard_ranges[2].update_meta(threshold - 1, 0,
meta_timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges[2])
pairs = find_shrinking_candidates(broker, threshold, threshold * 4)
# shenanigans to work around dicts with ShardRanges keys not comparing
def check_pairs(pairs):
acceptors = []
donors = []
for acceptor, donor in pairs.items():
acceptors.append(acceptor)
donors.append(donor)
acceptors.sort(key=ShardRange.sort_key)
donors.sort(key=ShardRange.sort_key)
self.assertEqual([shard_ranges[1], shard_ranges[3]], acceptors)
self.assertEqual([shard_ranges[0], shard_ranges[2]], donors)
check_pairs(pairs)
# repeat call after broker is updated and expect same pairs
shard_ranges[0].update_state(ShardRange.SHRINKING, next(self.ts_iter))
shard_ranges[2].update_state(ShardRange.SHRINKING, next(self.ts_iter))
shard_ranges[1].lower = shard_ranges[0].lower
shard_ranges[1].timestamp = next(self.ts_iter)
shard_ranges[3].lower = shard_ranges[2].lower
shard_ranges[3].timestamp = next(self.ts_iter)
broker.merge_shard_ranges(shard_ranges)
pairs = find_shrinking_candidates(broker, threshold, threshold * 4)
check_pairs(pairs)
def test_finalize_shrinking(self):
broker = self._make_broker()
broker.enable_sharding(next(self.ts_iter))
shard_bounds = (('', 'here'), ('here', 'there'), ('there', ''))
ts_0 = next(self.ts_iter)
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.ACTIVE, timestamp=ts_0)
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
ts_1 = next(self.ts_iter)
finalize_shrinking(broker, shard_ranges[2:], shard_ranges[:2], ts_1)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(
[ShardRange.SHRINKING, ShardRange.SHRINKING, ShardRange.ACTIVE],
[sr.state for sr in updated_ranges]
)
# acceptor is not updated...
self.assertEqual(ts_0, updated_ranges[2].timestamp)
# donors are updated...
self.assertEqual([ts_1] * 2,
[sr.state_timestamp for sr in updated_ranges[:2]])
self.assertEqual([ts_1] * 2,
[sr.epoch for sr in updated_ranges[:2]])
# check idempotency
ts_2 = next(self.ts_iter)
finalize_shrinking(broker, shard_ranges[2:], shard_ranges[:2], ts_2)
updated_ranges = broker.get_shard_ranges()
self.assertEqual(
[ShardRange.SHRINKING, ShardRange.SHRINKING, ShardRange.ACTIVE],
[sr.state for sr in updated_ranges]
)
# acceptor is not updated...
self.assertEqual(ts_0, updated_ranges[2].timestamp)
# donors are not updated...
self.assertEqual([ts_1] * 2,
[sr.state_timestamp for sr in updated_ranges[:2]])
self.assertEqual([ts_1] * 2,
[sr.epoch for sr in updated_ranges[:2]])
def test_process_compactible(self):
# no sequences...
broker = self._make_broker()
with mock.patch('swift.container.sharder.finalize_shrinking') as fs:
with mock_timestamp_now(next(self.ts_iter)) as now:
process_compactible_shard_sequences(broker, [])
fs.assert_called_once_with(broker, [], [], now)
# two sequences with acceptor bounds needing to be updated
ts_0 = next(self.ts_iter)
sequence_1 = self._make_shard_ranges(
(('a', 'b'), ('b', 'c'), ('c', 'd')),
state=ShardRange.ACTIVE, timestamp=ts_0)
sequence_2 = self._make_shard_ranges(
(('x', 'y'), ('y', 'z')),
state=ShardRange.ACTIVE, timestamp=ts_0)
with mock.patch('swift.container.sharder.finalize_shrinking') as fs:
with mock_timestamp_now(next(self.ts_iter)) as now:
process_compactible_shard_sequences(
broker, [sequence_1, sequence_2])
expected_donors = sequence_1[:-1] + sequence_2[:-1]
expected_acceptors = [sequence_1[-1].copy(lower='a', timestamp=now),
sequence_2[-1].copy(lower='x', timestamp=now)]
fs.assert_called_once_with(
broker, expected_acceptors, expected_donors, now)
self.assertEqual([dict(sr) for sr in expected_acceptors],
[dict(sr) for sr in fs.call_args[0][1]])
self.assertEqual([dict(sr) for sr in expected_donors],
[dict(sr) for sr in fs.call_args[0][2]])
# sequences have already been processed - acceptors expanded
sequence_1 = self._make_shard_ranges(
(('a', 'b'), ('b', 'c'), ('a', 'd')),
state=ShardRange.ACTIVE, timestamp=ts_0)
sequence_2 = self._make_shard_ranges(
(('x', 'y'), ('x', 'z')),
state=ShardRange.ACTIVE, timestamp=ts_0)
with mock.patch('swift.container.sharder.finalize_shrinking') as fs:
with mock_timestamp_now(next(self.ts_iter)) as now:
process_compactible_shard_sequences(
broker, [sequence_1, sequence_2])
expected_donors = sequence_1[:-1] + sequence_2[:-1]
expected_acceptors = [sequence_1[-1], sequence_2[-1]]
fs.assert_called_once_with(
broker, expected_acceptors, expected_donors, now)
self.assertEqual([dict(sr) for sr in expected_acceptors],
[dict(sr) for sr in fs.call_args[0][1]])
self.assertEqual([dict(sr) for sr in expected_donors],
[dict(sr) for sr in fs.call_args[0][2]])
# acceptor is root - needs state to be updated, but not bounds
sequence_1 = self._make_shard_ranges(
(('a', 'b'), ('b', 'c'), ('a', 'd'), ('d', ''), ('', '')),
state=[ShardRange.ACTIVE] * 4 + [ShardRange.SHARDED],
timestamp=ts_0)
with mock.patch('swift.container.sharder.finalize_shrinking') as fs:
with mock_timestamp_now(next(self.ts_iter)) as now:
process_compactible_shard_sequences(broker, [sequence_1])
expected_donors = sequence_1[:-1]
expected_acceptors = [sequence_1[-1].copy(state=ShardRange.ACTIVE,
state_timestamp=now)]
fs.assert_called_once_with(
broker, expected_acceptors, expected_donors, now)
self.assertEqual([dict(sr) for sr in expected_acceptors],
[dict(sr) for sr in fs.call_args[0][1]])
self.assertEqual([dict(sr) for sr in expected_donors],
[dict(sr) for sr in fs.call_args[0][2]])
def test_find_compactible_shard_ranges_in_found_state(self):
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.FOUND)
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([], sequences)
def test_find_compactible_no_donors(self):
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE, object_count=10)
broker.merge_shard_ranges(shard_ranges)
# shards exceed shrink threshold
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([], sequences)
# compacted shards would exceed merge size
sequences = find_compactible_shard_sequences(broker, 11, 19, -1, -1)
self.assertEqual([], sequences)
# shards exceed merge size
sequences = find_compactible_shard_sequences(broker, 11, 9, -1, -1)
self.assertEqual([], sequences)
# shards exceed merge size and shrink threshold
sequences = find_compactible_shard_sequences(broker, 10, 9, -1, -1)
self.assertEqual([], sequences)
# shards exceed *zero'd* merge size and shrink threshold
sequences = find_compactible_shard_sequences(broker, 0, 0, -1, -1)
self.assertEqual([], sequences)
# shards exceed *negative* merge size and shrink threshold
sequences = find_compactible_shard_sequences(broker, -1, -2, -1, -1)
self.assertEqual([], sequences)
# weird case: shards object count less than threshold but compacted
# shards would exceed merge size
sequences = find_compactible_shard_sequences(broker, 20, 19, -1, -1)
self.assertEqual([], sequences)
def test_find_compactible_nine_donors_one_acceptor(self):
# one sequence that spans entire namespace but does not shrink to root
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
shard_ranges[9].object_count = 11 # final shard too big to shrink
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges], sequences)
def test_find_compactible_four_donors_two_acceptors(self):
small_ranges = (2, 3, 4, 7)
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
for i, sr in enumerate(shard_ranges):
if i not in small_ranges:
sr.object_count = 100
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges[2:6], shard_ranges[7:9]], sequences)
def test_find_compactible_all_donors_shrink_to_root(self):
# by default all shard ranges are small enough to shrink so the root
# becomes the acceptor
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED)
broker.merge_shard_ranges(own_sr)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges + [own_sr]], sequences)
def test_find_compactible_single_donor_shrink_to_root(self):
# single shard range small enough to shrink so the root becomes the
# acceptor
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', ''),), state=ShardRange.ACTIVE, timestamp=next(self.ts_iter))
broker.merge_shard_ranges(shard_ranges)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED, next(self.ts_iter))
broker.merge_shard_ranges(own_sr)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges + [own_sr]], sequences)
# update broker with donor/acceptor
shard_ranges[0].update_state(ShardRange.SHRINKING, next(self.ts_iter))
own_sr.update_state(ShardRange.ACTIVE, next(self.ts_iter))
broker.merge_shard_ranges([shard_ranges[0], own_sr])
# we don't find the same sequence again...
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([], sequences)
# ...unless explicitly requesting it
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1,
include_shrinking=True)
self.assertEqual([shard_ranges + [own_sr]], sequences)
def test_find_compactible_overlapping_ranges(self):
# unexpected case: all shrinkable, two overlapping sequences, one which
# spans entire namespace; should not shrink to root
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), # overlaps form one sequence
('', 'j'), ('j', '')), # second sequence spans entire namespace
state=ShardRange.ACTIVE)
shard_ranges[1].object_count = 11 # cannot shrink, so becomes acceptor
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges[:2], shard_ranges[2:]], sequences)
def test_find_compactible_overlapping_ranges_with_ineligible_state(self):
# unexpected case: one ineligible state shard range overlapping one
# sequence which spans entire namespace; should not shrink to root
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), # overlap in ineligible state
('', 'j'), ('j', '')), # sequence spans entire namespace
state=[ShardRange.CREATED, ShardRange.ACTIVE, ShardRange.ACTIVE])
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges[1:]], sequences)
def test_find_compactible_donors_but_no_suitable_acceptor(self):
# if shard ranges are already shrinking, check that the final one is
# not made into an acceptor if a suitable adjacent acceptor is not
# found (unexpected scenario but possible in an overlap situation)
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=([ShardRange.SHRINKING] * 3 +
[ShardRange.SHARDING] +
[ShardRange.ACTIVE] * 6))
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges[4:]], sequences)
def test_find_compactible_no_gaps(self):
# verify that compactible sequences do not include gaps
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('e', 'f'), # gap d - e
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED)
broker.merge_shard_ranges(own_sr)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([shard_ranges[:3], shard_ranges[3:]], sequences)
def test_find_compactible_eligible_states(self):
# verify that compactible sequences only include shards in valid states
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=[ShardRange.SHRINKING, ShardRange.ACTIVE, # ok, shrinking
ShardRange.CREATED, # ineligible state
ShardRange.ACTIVE, ShardRange.ACTIVE, # ok
ShardRange.FOUND, # ineligible state
ShardRange.SHARDED, # ineligible state
ShardRange.ACTIVE, ShardRange.SHRINKING, # ineligible state
ShardRange.SHARDING, # ineligible state
])
broker.merge_shard_ranges(shard_ranges)
own_sr = broker.get_own_shard_range()
own_sr.update_state(ShardRange.SHARDED)
broker.merge_shard_ranges(own_sr)
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1,
include_shrinking=True)
self.assertEqual([shard_ranges[:2], shard_ranges[3:5], ], sequences)
def test_find_compactible_max_shrinking(self):
# verify option to limit the number of shrinking shards per acceptor
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
# limit to 1 donor per acceptor
sequences = find_compactible_shard_sequences(broker, 10, 999, 1, -1)
self.assertEqual([shard_ranges[n:n + 2] for n in range(0, 9, 2)],
sequences)
def test_find_compactible_max_expanding(self):
# verify option to limit the number of expanding shards per acceptor
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE)
broker.merge_shard_ranges(shard_ranges)
# note: max_shrinking is set to 3 so that there is opportunity for more
# than 2 acceptors
sequences = find_compactible_shard_sequences(broker, 10, 999, 3, 2)
self.assertEqual([shard_ranges[:4], shard_ranges[4:8]], sequences)
# relax max_expanding
sequences = find_compactible_shard_sequences(broker, 10, 999, 3, 3)
self.assertEqual(
[shard_ranges[:4], shard_ranges[4:8], shard_ranges[8:]], sequences)
# commit the first two sequences to the broker
for sr in shard_ranges[:3] + shard_ranges[4:7]:
sr.update_state(ShardRange.SHRINKING,
state_timestamp=next(self.ts_iter))
shard_ranges[3].lower = shard_ranges[0].lower
shard_ranges[3].timestamp = next(self.ts_iter)
shard_ranges[7].lower = shard_ranges[4].lower
shard_ranges[7].timestamp = next(self.ts_iter)
broker.merge_shard_ranges(shard_ranges)
# we don't find them again...
sequences = find_compactible_shard_sequences(broker, 10, 999, 3, 2)
self.assertEqual([], sequences)
# ...unless requested explicitly
sequences = find_compactible_shard_sequences(broker, 10, 999, 3, 2,
include_shrinking=True)
self.assertEqual([shard_ranges[:4], shard_ranges[4:8]], sequences)
# we could find another if max_expanding is increased
sequences = find_compactible_shard_sequences(broker, 10, 999, 3, 3)
self.assertEqual([shard_ranges[8:]], sequences)
def _do_test_find_compactible_shrink_threshold(self, broker, shard_ranges):
# verify option to set the shrink threshold for compaction;
# (n-2)th shard range has one extra object
shard_ranges[-2].object_count = 11
broker.merge_shard_ranges(shard_ranges)
# with threshold set to 10 no shard ranges can be shrunk
sequences = find_compactible_shard_sequences(broker, 10, 999, -1, -1)
self.assertEqual([], sequences)
# with threshold == 11 all but the final 2 shard ranges can be shrunk;
# note: the (n-1)th shard range is NOT shrunk to root
sequences = find_compactible_shard_sequences(broker, 11, 999, -1, -1)
self.assertEqual([shard_ranges[:9]], sequences)
def test_find_compactible_shrink_threshold(self):
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE, object_count=10)
self._do_test_find_compactible_shrink_threshold(broker, shard_ranges)
def test_find_compactible_shrink_threshold_with_tombstones(self):
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE, object_count=7, tombstones=3)
self._do_test_find_compactible_shrink_threshold(broker, shard_ranges)
def _do_test_find_compactible_expansion_limit(self, broker, shard_ranges):
# verify option to limit the size of each acceptor after compaction
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 33, -1, -1)
self.assertEqual([shard_ranges[:5], shard_ranges[5:]], sequences)
shard_ranges[4].update_meta(20, 2000)
shard_ranges[6].update_meta(28, 2700)
broker.merge_shard_ranges(shard_ranges)
sequences = find_compactible_shard_sequences(broker, 10, 33, -1, -1)
self.assertEqual([shard_ranges[:4], shard_ranges[7:]], sequences)
def test_find_compactible_expansion_limit(self):
# verify option to limit the size of each acceptor after compaction
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE, object_count=6)
self._do_test_find_compactible_expansion_limit(broker, shard_ranges)
def test_find_compactible_expansion_limit_with_tombstones(self):
# verify option to limit the size of each acceptor after compaction
broker = self._make_broker()
shard_ranges = self._make_shard_ranges(
(('', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'),
('f', 'g'), ('g', 'h'), ('h', 'i'), ('i', 'j'), ('j', '')),
state=ShardRange.ACTIVE, object_count=1, tombstones=5)
self._do_test_find_compactible_expansion_limit(broker, shard_ranges)
def test_is_sharding_candidate(self):
for state in ShardRange.STATES:
for object_count in (9, 10, 11):
sr = ShardRange('.shards_a/c', next(self.ts_iter), '', '',
state=state, object_count=object_count,
tombstones=100) # tombstones not considered
with annotate_failure('%s %s' % (state, object_count)):
if state == ShardRange.ACTIVE and object_count >= 10:
self.assertTrue(is_sharding_candidate(sr, 10))
else:
self.assertFalse(is_sharding_candidate(sr, 10))
def test_is_shrinking_candidate(self):
def do_check_true(state, ok_states):
# shard range has 9 objects
sr = ShardRange('.shards_a/c', next(self.ts_iter), '', '',
state=state, object_count=9)
self.assertTrue(is_shrinking_candidate(sr, 10, 9, ok_states))
# shard range has 9 rows
sr = ShardRange('.shards_a/c', next(self.ts_iter), '', '',
state=state, object_count=4, tombstones=5)
self.assertTrue(is_shrinking_candidate(sr, 10, 9, ok_states))
do_check_true(ShardRange.ACTIVE, (ShardRange.ACTIVE,))
do_check_true(ShardRange.ACTIVE,
(ShardRange.ACTIVE, ShardRange.SHRINKING))
do_check_true(ShardRange.SHRINKING,
(ShardRange.ACTIVE, ShardRange.SHRINKING))
def do_check_false(state, object_count, tombstones):
states = (ShardRange.ACTIVE, ShardRange.SHRINKING)
# shard range has 10 objects
sr = ShardRange('.shards_a/c', next(self.ts_iter), '', '',
state=state, object_count=object_count,
tombstones=tombstones)
self.assertFalse(is_shrinking_candidate(sr, 10, 20))
self.assertFalse(is_shrinking_candidate(sr, 10, 20, states))
self.assertFalse(is_shrinking_candidate(sr, 10, 9))
self.assertFalse(is_shrinking_candidate(sr, 10, 9, states))
self.assertFalse(is_shrinking_candidate(sr, 20, 9))
self.assertFalse(is_shrinking_candidate(sr, 20, 9, states))
for state in ShardRange.STATES:
for object_count in (10, 11):
with annotate_failure('%s %s' % (state, object_count)):
do_check_false(state, object_count, 0)
for tombstones in (10, 11):
with annotate_failure('%s %s' % (state, tombstones)):
do_check_false(state, 0, tombstones)
for tombstones in (5, 6):
with annotate_failure('%s %s' % (state, tombstones)):
do_check_false(state, 5, tombstones)
def test_find_and_rank_whole_path_split(self):
ts_0 = next(self.ts_iter)
ts_1 = next(self.ts_iter)
bounds_0 = (
('', 'f'),
('f', 'k'),
('k', 's'),
('s', 'x'),
('x', ''),
)
bounds_1 = (
('', 'g'),
('g', 'l'),
('l', 't'),
('t', 'y'),
('y', ''),
)
# path with newer timestamp wins
ranges_0 = self._make_shard_ranges(bounds_0, ShardRange.ACTIVE,
timestamp=ts_0)
ranges_1 = self._make_shard_ranges(bounds_1, ShardRange.ACTIVE,
timestamp=ts_1)
paths = find_paths(ranges_0 + ranges_1)
self.assertEqual(2, len(paths))
self.assertIn(ranges_0, paths)
self.assertIn(ranges_1, paths)
own_sr = ShardRange('a/c', Timestamp.now())
self.assertEqual(
[
ranges_1, # complete and newer timestamp
ranges_0, # complete
],
rank_paths(paths, own_sr))
# but object_count trumps matching timestamp
ranges_0 = self._make_shard_ranges(bounds_0, ShardRange.ACTIVE,
timestamp=ts_1, object_count=1)
paths = find_paths(ranges_0 + ranges_1)
self.assertEqual(2, len(paths))
self.assertIn(ranges_0, paths)
self.assertIn(ranges_1, paths)
self.assertEqual(
[
ranges_0, # complete with more objects
ranges_1, # complete
],
rank_paths(paths, own_sr))
def test_find_and_rank_two_sub_path_splits(self):
ts_0 = next(self.ts_iter)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
bounds_0 = (
('', 'a'),
('a', 'm'),
('m', 'p'),
('p', 't'),
('t', 'x'),
('x', 'y'),
('y', ''),
)
bounds_1 = (
('a', 'g'), # split at 'a'
('g', 'l'),
('l', 'm'), # rejoin at 'm'
)
bounds_2 = (
('t', 'y'), # split at 't', rejoin at 'y'
)
ranges_0 = self._make_shard_ranges(bounds_0, ShardRange.ACTIVE,
timestamp=ts_0)
ranges_1 = self._make_shard_ranges(bounds_1, ShardRange.ACTIVE,
timestamp=ts_1, object_count=1)
ranges_2 = self._make_shard_ranges(bounds_2, ShardRange.ACTIVE,
timestamp=ts_2, object_count=1)
# all paths are complete
mix_path_0 = ranges_0[:1] + ranges_1 + ranges_0[2:] # 3 objects
mix_path_1 = ranges_0[:4] + ranges_2 + ranges_0[6:] # 1 object
mix_path_2 = (ranges_0[:1] + ranges_1 + ranges_0[2:4] + ranges_2 +
ranges_0[6:]) # 4 objects
paths = find_paths(ranges_0 + ranges_1 + ranges_2)
self.assertEqual(4, len(paths))
self.assertIn(ranges_0, paths)
self.assertIn(mix_path_0, paths)
self.assertIn(mix_path_1, paths)
self.assertIn(mix_path_2, paths)
own_sr = ShardRange('a/c', Timestamp.now())
self.assertEqual(
[
mix_path_2, # has 4 objects, 3 different timestamps
mix_path_0, # has 3 objects, 2 different timestamps
mix_path_1, # has 1 object, 2 different timestamps
ranges_0, # has 0 objects, 1 timestamp
],
rank_paths(paths, own_sr)
)
def test_find_and_rank_most_cleave_progress(self):
ts_0 = next(self.ts_iter)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
bounds_0 = (
('', 'f'),
('f', 'k'),
('k', 'p'),
('p', '')
)
bounds_1 = (
('', 'g'),
('g', 'l'),
('l', 'q'),
('q', '')
)
bounds_2 = (
('', 'r'),
('r', '')
)
ranges_0 = self._make_shard_ranges(
bounds_0, [ShardRange.CLEAVED] * 3 + [ShardRange.CREATED],
timestamp=ts_1, object_count=1)
ranges_1 = self._make_shard_ranges(
bounds_1, [ShardRange.CLEAVED] * 4,
timestamp=ts_0)
ranges_2 = self._make_shard_ranges(
bounds_2, [ShardRange.CLEAVED, ShardRange.CREATED],
timestamp=ts_2, object_count=1)
paths = find_paths(ranges_0 + ranges_1 + ranges_2)
self.assertEqual(3, len(paths))
own_sr = ShardRange('a/c', Timestamp.now())
self.assertEqual(
[
ranges_1, # cleaved to end
ranges_2, # cleaved to r
ranges_0, # cleaved to p
],
rank_paths(paths, own_sr)
)
ranges_2 = self._make_shard_ranges(
bounds_2, [ShardRange.ACTIVE] * 2,
timestamp=ts_2, object_count=1)
paths = find_paths(ranges_0 + ranges_1 + ranges_2)
self.assertEqual(
[
ranges_2, # active to end, newer timestamp
ranges_1, # cleaved to r
ranges_0, # cleaved to p
],
rank_paths(paths, own_sr)
)
def test_find_and_rank_no_complete_path(self):
ts_0 = next(self.ts_iter)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
bounds_0 = (
('', 'f'),
('f', 'k'),
('k', 'm'),
)
bounds_1 = (
('', 'g'),
('g', 'l'),
('l', 'n'),
)
bounds_2 = (
('', 'l'),
)
ranges_0 = self._make_shard_ranges(bounds_0, ShardRange.ACTIVE,
timestamp=ts_0)
ranges_1 = self._make_shard_ranges(bounds_1, ShardRange.ACTIVE,
timestamp=ts_1, object_count=1)
ranges_2 = self._make_shard_ranges(bounds_2, ShardRange.ACTIVE,
timestamp=ts_2, object_count=1)
mix_path_0 = ranges_2 + ranges_1[2:]
paths = find_paths(ranges_0 + ranges_1 + ranges_2)
self.assertEqual(3, len(paths))
self.assertIn(ranges_0, paths)
self.assertIn(ranges_1, paths)
self.assertIn(mix_path_0, paths)
own_sr = ShardRange('a/c', Timestamp.now())
self.assertEqual(
[
ranges_1, # cleaved to n, one timestamp
mix_path_0, # cleaved to n, has two different timestamps
ranges_0, # cleaved to m
],
rank_paths(paths, own_sr)
)
def test_find_paths_with_gaps(self):
bounds = (
# gap
('a', 'f'),
('f', 'm'), # overlap
('k', 'p'),
# gap
('q', 'y')
# gap
)
ranges = self._make_shard_ranges(
bounds, ShardRange.ACTIVE,
timestamp=next(self.ts_iter), object_count=1)
paths_with_gaps = find_paths_with_gaps(ranges)
self.assertEqual(3, len(paths_with_gaps), paths_with_gaps)
self.assertEqual(
[(ShardRange.MIN, ShardRange.MIN),
(ShardRange.MIN, 'a'),
('a', 'm')],
[(r.lower, r.upper) for r in paths_with_gaps[0]]
)
self.assertEqual(
[('k', 'p'),
('p', 'q'),
('q', 'y')],
[(r.lower, r.upper) for r in paths_with_gaps[1]]
)
self.assertEqual(
[('q', 'y'),
('y', ShardRange.MAX),
(ShardRange.MAX, ShardRange.MAX)],
[(r.lower, r.upper) for r in paths_with_gaps[2]]
)
range_of_interest = ShardRange('test/range', next(self.ts_iter))
range_of_interest.lower = 'a'
paths_with_gaps = find_paths_with_gaps(ranges, range_of_interest)
self.assertEqual(2, len(paths_with_gaps), paths_with_gaps)
self.assertEqual(
[('k', 'p'),
('p', 'q'),
('q', 'y')],
[(r.lower, r.upper) for r in paths_with_gaps[0]]
)
self.assertEqual(
[('q', 'y'),
('y', ShardRange.MAX),
(ShardRange.MAX, ShardRange.MAX)],
[(r.lower, r.upper) for r in paths_with_gaps[1]]
)
range_of_interest.lower = 'b'
range_of_interest.upper = 'x'
paths_with_gaps = find_paths_with_gaps(ranges, range_of_interest)
self.assertEqual(1, len(paths_with_gaps), paths_with_gaps)
self.assertEqual(
[('k', 'p'),
('p', 'q'),
('q', 'y')],
[(r.lower, r.upper) for r in paths_with_gaps[0]]
)
range_of_interest.upper = 'c'
paths_with_gaps = find_paths_with_gaps(ranges, range_of_interest)
self.assertFalse(paths_with_gaps)
def test_find_overlapping_ranges(self):
now_ts = next(self.ts_iter)
past_ts = Timestamp(float(now_ts) - 61)
root_sr = ShardRange('a/c', past_ts, state=ShardRange.SHARDED)
bounds = (
('', 'a'),
('a', 'f'), # the 'parent_range' in this test.
('f', 'm'), # shard range overlaps with the next.
('k', 'p'),
('p', 'y'),
('y', '')
)
ranges = [
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', root_sr.container, past_ts,
index),
past_ts, lower, upper, object_count=1,
state=ShardRange.SHARDED)
for index, (lower, upper) in enumerate(bounds)]
parent_range = ranges[1]
child_ranges = [
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_range.container, past_ts, 0),
past_ts, lower='a', upper='c', object_count=1,
state=ShardRange.CLEAVED),
ShardRange(
ShardRange.make_path(
'.shards_a', 'c', parent_range.container, past_ts, 1),
past_ts, lower='c', upper='f', object_count=1,
state=ShardRange.CLEAVED)]
overlapping_ranges = find_overlapping_ranges(ranges)
self.assertEqual({(ranges[2], ranges[3])}, overlapping_ranges)
overlapping_ranges = find_overlapping_ranges(
[ranges[1]] + child_ranges)
self.assertEqual(
{(child_ranges[0], child_ranges[1], ranges[1])},
overlapping_ranges)
overlapping_ranges = find_overlapping_ranges(
[ranges[1]] + child_ranges, exclude_parent_child=True)
self.assertEqual(0, len(overlapping_ranges))
with mock.patch(
'swift.container.sharder.time.time',
return_value=float(now_ts)):
overlapping_ranges = find_overlapping_ranges(
[ranges[1]] + child_ranges, exclude_parent_child=True,
time_period=61)
self.assertEqual(0, len(overlapping_ranges))
overlapping_ranges = find_overlapping_ranges(
[ranges[1]] + child_ranges, exclude_parent_child=True,
time_period=60)
self.assertEqual(
{(child_ranges[0], child_ranges[1], ranges[1])},
overlapping_ranges)
overlapping_ranges = find_overlapping_ranges(
ranges + child_ranges)
self.assertEqual(
{(child_ranges[0],
child_ranges[1],
ranges[1]),
(ranges[2],
ranges[3])},
overlapping_ranges)
overlapping_ranges = find_overlapping_ranges(
ranges + child_ranges, exclude_parent_child=True)
self.assertEqual({(ranges[2], ranges[3])}, overlapping_ranges)
with mock.patch(
'swift.container.sharder.time.time',
return_value=float(now_ts)):
overlapping_ranges = find_overlapping_ranges(
ranges + child_ranges, exclude_parent_child=True,
time_period=61)
self.assertEqual({(ranges[2], ranges[3])}, overlapping_ranges)
overlapping_ranges = find_overlapping_ranges(
ranges + child_ranges, exclude_parent_child=True,
time_period=60)
self.assertEqual(
{(child_ranges[0],
child_ranges[1],
ranges[1]),
(ranges[2],
ranges[3])},
overlapping_ranges)
def test_update_own_shard_range_stats(self):
broker = self._make_broker()
ts = next(self.ts_iter)
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 9,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
self.assertEqual(100, broker.get_info()['object_count'])
self.assertEqual(900, broker.get_info()['bytes_used'])
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
self.assertEqual(0, own_sr.bytes_used)
# own_sr is updated...
update_own_shard_range_stats(broker, own_sr)
self.assertEqual(100, own_sr.object_count)
self.assertEqual(900, own_sr.bytes_used)
# ...but not persisted
own_sr = broker.get_own_shard_range()
self.assertEqual(0, own_sr.object_count)
self.assertEqual(0, own_sr.bytes_used)
class TestContainerSharderConf(unittest.TestCase):
def test_default(self):
expected = {'shard_container_threshold': 1000000,
'max_shrinking': 1,
'max_expanding': -1,
'shard_scanner_batch_size': 10,
'cleave_batch_size': 2,
'cleave_row_batch_size': 10000,
'broker_timeout': 60,
'recon_candidates_limit': 5,
'recon_sharded_timeout': 43200,
'container_sharding_timeout': 172800,
'conn_timeout': 5.0,
'auto_shard': False,
'shrink_threshold': 100000,
'expansion_limit': 750000,
'rows_per_shard': 500000,
'minimum_shard_size': 100000}
self.assertEqual(expected, vars(ContainerSharderConf()))
self.assertEqual(expected, vars(ContainerSharderConf(None)))
self.assertEqual(expected, DEFAULT_SHARDER_CONF)
def test_conf(self):
conf = {'shard_container_threshold': 2000000,
'max_shrinking': 2,
'max_expanding': 3,
'shard_scanner_batch_size': 11,
'cleave_batch_size': 4,
'cleave_row_batch_size': 50000,
'broker_timeout': 61,
'recon_candidates_limit': 6,
'recon_sharded_timeout': 43201,
'container_sharding_timeout': 172801,
'conn_timeout': 5.1,
'auto_shard': True,
'shrink_threshold': 100001,
'expansion_limit': 750001,
'rows_per_shard': 500001,
'minimum_shard_size': 20}
expected = dict(conf)
conf.update({'unexpected': 'option'})
self.assertEqual(expected, vars(ContainerSharderConf(conf)))
def test_deprecated_percent_conf(self):
base_conf = {'shard_container_threshold': 2000000,
'max_shrinking': 2,
'max_expanding': 3,
'shard_scanner_batch_size': 11,
'cleave_batch_size': 4,
'cleave_row_batch_size': 50000,
'broker_timeout': 61,
'recon_candidates_limit': 6,
'recon_sharded_timeout': 43201,
'container_sharding_timeout': 172801,
'conn_timeout': 5.1,
'auto_shard': True,
'minimum_shard_size': 1}
# percent options work
deprecated_conf = {'shard_shrink_point': 9,
'shard_shrink_merge_point': 71}
expected = dict(base_conf, rows_per_shard=1000000,
shrink_threshold=180000, expansion_limit=1420000)
conf = dict(base_conf)
conf.update(deprecated_conf)
self.assertEqual(expected, vars(ContainerSharderConf(conf)))
# check absolute options override percent options
conf.update({'shrink_threshold': 100001,
'expansion_limit': 750001})
expected = dict(base_conf, rows_per_shard=1000000,
shrink_threshold=100001, expansion_limit=750001)
conf.update(deprecated_conf)
self.assertEqual(expected, vars(ContainerSharderConf(conf)))
def test_bad_values(self):
not_positive_int = [0, -1, 'bad']
not_int = not_float = ['bad']
not_percent = ['bad', -1, 101, -0.1, 100.1]
bad = {'shard_container_threshold': not_positive_int,
'max_shrinking': not_int,
'max_expanding': not_int,
'shard_scanner_batch_size': not_positive_int,
'cleave_batch_size': not_positive_int,
'cleave_row_batch_size': not_positive_int,
'broker_timeout': not_positive_int,
'recon_candidates_limit': not_int,
'recon_sharded_timeout': not_int,
'conn_timeout': not_float,
# 'auto_shard': anything can be passed to config_true_value
'shrink_threshold': not_int,
'expansion_limit': not_int,
'shard_shrink_point': not_percent,
'shard_shrink_merge_point': not_percent,
'minimum_shard_size': not_positive_int}
for key, bad_values in bad.items():
for bad_value in bad_values:
with self.assertRaises(
ValueError, msg='{%s : %s}' % (key, bad_value)) as cm:
ContainerSharderConf({key: bad_value})
self.assertIn('Error setting %s' % key, str(cm.exception))
def test_validate(self):
def assert_bad(conf):
with self.assertRaises(ValueError):
ContainerSharderConf.validate_conf(ContainerSharderConf(conf))
def assert_ok(conf):
try:
ContainerSharderConf.validate_conf(ContainerSharderConf(conf))
except ValueError as err:
self.fail('Unexpected ValueError: %s' % err)
assert_ok({})
assert_ok({'minimum_shard_size': 100,
'shrink_threshold': 100,
'rows_per_shard': 100})
assert_bad({'minimum_shard_size': 100})
assert_bad({'shrink_threshold': 100001})
assert_ok({'minimum_shard_size': 100,
'shrink_threshold': 100})
assert_bad({'minimum_shard_size': 100,
'shrink_threshold': 100,
'rows_per_shard': 99})
assert_ok({'shard_container_threshold': 100,
'rows_per_shard': 99})
assert_bad({'shard_container_threshold': 100,
'rows_per_shard': 100})
assert_bad({'rows_per_shard': 10000001})
assert_ok({'shard_container_threshold': 100,
'expansion_limit': 99})
assert_bad({'shard_container_threshold': 100,
'expansion_limit': 100})
assert_bad({'expansion_limit': 100000001})
def test_validate_subset(self):
# verify that validation is only applied for keys that exist in the
# given namespace
def assert_bad(conf):
with self.assertRaises(ValueError):
ContainerSharderConf.validate_conf(Namespace(**conf))
def assert_ok(conf):
try:
ContainerSharderConf.validate_conf(Namespace(**conf))
except ValueError as err:
self.fail('Unexpected ValueError: %s' % err)
assert_ok({})
assert_ok({'minimum_shard_size': 100,
'shrink_threshold': 100,
'rows_per_shard': 100})
assert_ok({'minimum_shard_size': 100})
assert_ok({'shrink_threshold': 100001})
assert_ok({'minimum_shard_size': 100,
'shrink_threshold': 100})
assert_bad({'minimum_shard_size': 100,
'shrink_threshold': 100,
'rows_per_shard': 99})
assert_ok({'shard_container_threshold': 100,
'rows_per_shard': 99})
assert_bad({'shard_container_threshold': 100,
'rows_per_shard': 100})
assert_ok({'rows_per_shard': 10000001})
assert_ok({'shard_container_threshold': 100,
'expansion_limit': 99})
assert_bad({'shard_container_threshold': 100,
'expansion_limit': 100})
assert_ok({'expansion_limit': 100000001})
def test_combine_shard_ranges(self):
ts_iter = make_timestamp_iter()
this = ShardRange('a/o', next(ts_iter).internal)
that = ShardRange('a/o', next(ts_iter).internal)
actual = combine_shard_ranges([dict(this)], [dict(that)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
actual = combine_shard_ranges([dict(that)], [dict(this)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
ts = next(ts_iter).internal
this = ShardRange('a/o', ts, state=ShardRange.ACTIVE,
state_timestamp=next(ts_iter))
that = ShardRange('a/o', ts, state=ShardRange.CREATED,
state_timestamp=next(ts_iter))
actual = combine_shard_ranges([dict(this)], [dict(that)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
actual = combine_shard_ranges([dict(that)], [dict(this)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
that.update_meta(1, 2, meta_timestamp=next(ts_iter))
this.update_meta(3, 4, meta_timestamp=next(ts_iter))
expected = that.copy(object_count=this.object_count,
bytes_used=this.bytes_used,
meta_timestamp=this.meta_timestamp)
actual = combine_shard_ranges([dict(this)], [dict(that)])
self.assertEqual([dict(expected)], [dict(sr) for sr in actual])
actual = combine_shard_ranges([dict(that)], [dict(this)])
self.assertEqual([dict(expected)], [dict(sr) for sr in actual])
this = ShardRange('a/o', next(ts_iter).internal)
that = ShardRange('a/o', next(ts_iter).internal, deleted=True)
actual = combine_shard_ranges([dict(this)], [dict(that)])
self.assertFalse(actual, [dict(sr) for sr in actual])
actual = combine_shard_ranges([dict(that)], [dict(this)])
self.assertFalse(actual, [dict(sr) for sr in actual])
this = ShardRange('a/o', next(ts_iter).internal, deleted=True)
that = ShardRange('a/o', next(ts_iter).internal)
actual = combine_shard_ranges([dict(this)], [dict(that)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
actual = combine_shard_ranges([dict(that)], [dict(this)])
self.assertEqual([dict(that)], [dict(sr) for sr in actual])
| swift-master | test/unit/container/test_sharder.py |
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
import mock
import random
import logging
import unittest
import tempfile
from shutil import rmtree
from test.debug_logger import debug_logger
from swift.container.backend import DATADIR
from swift.container import sync_store
class FakeContainerBroker(object):
def __init__(self, path):
self.db_file = path
self.db_dir = os.path.dirname(path)
self.metadata = dict()
self._is_deleted = False
def is_deleted(self):
return self._is_deleted
class TestContainerSyncStore(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('test-container-sync-store')
self.logger.level = logging.DEBUG
self.test_dir_prefix = tempfile.mkdtemp()
self.devices_dir = os.path.join(self.test_dir_prefix, 'srv/node/')
os.makedirs(self.devices_dir)
# Create dummy container dbs
self.devices = ['sdax', 'sdb', 'sdc']
self.partitions = ['21765', '38965', '13234']
self.suffixes = ['312', '435']
self.hashes = ['f19ed', '53ef', '0ab5', '9c3a']
for device in self.devices:
data_dir_path = os.path.join(self.devices_dir,
device,
DATADIR)
os.makedirs(data_dir_path)
for part in self.partitions:
for suffix in self.suffixes:
for hsh in self.hashes:
db_dir = os.path.join(data_dir_path,
part,
suffix,
hsh)
os.makedirs(db_dir)
db_file = os.path.join(db_dir, '%s.db' % hsh)
with open(db_file, 'w') as outfile:
outfile.write('%s' % db_file)
def tearDown(self):
rmtree(self.test_dir_prefix)
def pick_dbfile(self):
hsh = random.choice(self.hashes)
return os.path.join(self.devices_dir,
random.choice(self.devices),
DATADIR,
random.choice(self.partitions),
random.choice(self.suffixes),
hsh,
'%s.db' % hsh)
# Path conversion tests
# container path is of the form:
# /srv/node/sdb/containers/part/.../*.db
# or more generally:
# devices/device/DATADIR/part/.../*.db
# synced container path is assumed to be of the form:
# /srv/node/sdb/sync_containers/part/.../*.db
# or more generally:
# devices/device/SYNC_DATADIR/part/.../*.db
# Indeed the ONLY DIFFERENCE is DATADIR <-> SYNC_DATADIR
# Since, however, the strings represented by the constants
# DATADIR or SYNC_DATADIR
# can appear in the devices or the device part, the conversion
# function between the two is a bit more subtle then a mere replacement.
# This function tests the conversion between a container path
# and a synced container path
def test_container_to_synced_container_path_conversion(self):
# The conversion functions are oblivious to the suffix
# so we just pick up a constant one.
db_path_suffix = self._db_path_suffix()
# We build various container path putting in both
# DATADIR and SYNC_DATADIR strings in the
# device and devices parts.
for devices, device in self._container_path_elements_generator():
path = os.path.join(devices, device, DATADIR, db_path_suffix)
# Call the conversion function
sds = sync_store.ContainerSyncStore(devices, self.logger, False)
path = sds._container_to_synced_container_path(path)
# Validate that ONLY the DATADIR part was replaced with
# sync_store.SYNC_DATADIR
self._validate_container_path_parts(path, devices, device,
sync_store.SYNC_DATADIR,
db_path_suffix)
# This function tests the conversion between a synced container path
# and a container path
def test_synced_container_to_container_path_conversion(self):
# The conversion functions are oblivious to the suffix
# so we just pick up a constant one.
db_path_suffix = ('133791/625/82a7f5a2c43281b0eab3597e35bb9625/'
'82a7f5a2c43281b0eab3597e35bb9625.db')
# We build various synced container path putting in both
# DATADIR and SYNC_DATADIR strings in the
# device and devices parts.
for devices, device in self._container_path_elements_generator():
path = os.path.join(devices, device,
sync_store.SYNC_DATADIR, db_path_suffix)
# Call the conversion function
sds = sync_store.ContainerSyncStore(devices, self.logger, False)
path = sds._synced_container_to_container_path(path)
# Validate that ONLY the SYNC_DATADIR part was replaced with
# DATADIR
self._validate_container_path_parts(path, devices, device,
DATADIR,
db_path_suffix)
# Constructs a db path suffix of the form:
# 133791/625/82...25/82...25.db
def _db_path_suffix(self):
def random_hexa_string(length):
'%0x' % random.randrange(16 ** length)
db = random_hexa_string(32)
return '%s/%s/%s/%s.db' % (random_hexa_string(5),
random_hexa_string(3),
db, db)
def _container_path_elements_generator(self):
# We build various container path elements putting in both
# DATADIR and SYNC_DATADIR strings in the
# device and devices parts.
for devices in ['/srv/node', '/srv/node/',
'/srv/node/dev',
'/srv/node/%s' % DATADIR,
'/srv/node/%s' % sync_store.SYNC_DATADIR]:
for device in ['sdf1', 'sdf1/sdf2',
'sdf1/%s' % DATADIR,
'sdf1/%s' % sync_store.SYNC_DATADIR,
'%s/sda' % DATADIR,
'%s/sda' % sync_store.SYNC_DATADIR]:
yield devices, device
def _validate_container_path_parts(self, path, devices,
device, target, suffix):
# Recall that the path is of the form:
# devices/device/target/suffix
# where each of the sub path elements (e.g. devices)
# has a path structure containing path elements separated by '/'
# We thus validate by splitting the path according to '/'
# traversing all of its path elements making sure that the
# first elements are those of devices,
# the second are those of device
# etc.
spath = path.split('/')
spath.reverse()
self.assertEqual(spath.pop(), '')
# Validate path against 'devices'
for p in [p for p in devices.split('/') if p]:
self.assertEqual(spath.pop(), p)
# Validate path against 'device'
for p in [p for p in device.split('/') if p]:
self.assertEqual(spath.pop(), p)
# Validate path against target
self.assertEqual(spath.pop(), target)
# Validate path against suffix
for p in [p for p in suffix.split('/') if p]:
self.assertEqual(spath.pop(), p)
def test_add_synced_container(self):
# Add non-existing and existing synced containers
sds = sync_store.ContainerSyncStore(self.devices_dir,
self.logger,
False)
cfile = self.pick_dbfile()
broker = FakeContainerBroker(cfile)
for i in range(2):
sds.add_synced_container(broker)
scpath = sds._container_to_synced_container_path(cfile)
with open(scpath, 'r') as infile:
self.assertEqual(infile.read(), cfile)
iterated_synced_containers = list()
for db_path in sds.synced_containers_generator():
iterated_synced_containers.append(db_path)
self.assertEqual(len(iterated_synced_containers), 1)
def test_remove_synced_container(self):
# Add a synced container to remove
sds = sync_store.ContainerSyncStore(self.devices_dir,
self.logger,
False)
cfile = self.pick_dbfile()
# We keep here the link file so as to validate its deletion later
lfile = sds._container_to_synced_container_path(cfile)
broker = FakeContainerBroker(cfile)
sds.add_synced_container(broker)
# Remove existing and non-existing synced containers
for i in range(2):
sds.remove_synced_container(broker)
iterated_synced_containers = list()
for db_path in sds.synced_containers_generator():
iterated_synced_containers.append(db_path)
self.assertEqual(len(iterated_synced_containers), 0)
# Make sure the whole link path gets deleted
# recall that the path has the following suffix:
# <hexa string of length 6>/<hexa string of length 3>/
# <hexa string of length 32>/<same 32 hexa string>.db
# and we expect the .db as well as all path elements
# to get deleted
self.assertFalse(os.path.exists(lfile))
lfile = os.path.dirname(lfile)
for i in range(3):
self.assertFalse(os.path.exists(os.path.dirname(lfile)))
lfile = os.path.dirname(lfile)
def test_iterate_synced_containers(self):
# populate sync container db
sds = sync_store.ContainerSyncStore(self.devices_dir,
self.logger,
False)
containers = list()
for i in range(10):
cfile = self.pick_dbfile()
broker = FakeContainerBroker(cfile)
sds.add_synced_container(broker)
containers.append(cfile)
iterated_synced_containers = list()
for db_path in sds.synced_containers_generator():
iterated_synced_containers.append(db_path)
self.assertEqual(
set(containers), set(iterated_synced_containers))
def test_unhandled_exceptions_in_add_remove(self):
sds = sync_store.ContainerSyncStore(self.devices_dir,
self.logger,
False)
cfile = self.pick_dbfile()
broker = FakeContainerBroker(cfile)
with mock.patch(
'swift.container.sync_store.os.stat',
side_effect=OSError(errno.EPERM, 'permission denied')):
with self.assertRaises(OSError) as cm:
sds.add_synced_container(broker)
self.assertEqual(errno.EPERM, cm.exception.errno)
with mock.patch(
'swift.container.sync_store.os.makedirs',
side_effect=OSError(errno.EPERM, 'permission denied')):
with self.assertRaises(OSError) as cm:
sds.add_synced_container(broker)
self.assertEqual(errno.EPERM, cm.exception.errno)
with mock.patch(
'swift.container.sync_store.os.symlink',
side_effect=OSError(errno.EPERM, 'permission denied')):
with self.assertRaises(OSError) as cm:
sds.add_synced_container(broker)
self.assertEqual(errno.EPERM, cm.exception.errno)
with mock.patch(
'swift.container.sync_store.os.unlink',
side_effect=OSError(errno.EPERM, 'permission denied')):
with self.assertRaises(OSError) as cm:
sds.remove_synced_container(broker)
self.assertEqual(errno.EPERM, cm.exception.errno)
def test_update_sync_store_according_to_metadata_and_deleted(self):
# This function tests the update_sync_store 'logics'
# with respect to various combinations of the
# sync-to and sync-key metadata items and whether
# the database is marked for delete.
# The table below summarizes the expected result
# for the various combinations, e.g.:
# If metadata items exist and the database
# is not marked for delete then add should be called.
results_list = [
[False, 'a', 'b', 'add'],
[False, 'a', '', 'remove'],
[False, 'a', None, 'remove'],
[False, '', 'b', 'remove'],
[False, '', '', 'remove'],
[False, '', None, 'remove'],
[False, None, 'b', 'remove'],
[False, None, '', 'remove'],
[False, None, None, 'none'],
[True, 'a', 'b', 'remove'],
[True, 'a', '', 'remove'],
[True, 'a', None, 'remove'],
[True, '', 'b', 'remove'],
[True, '', '', 'remove'],
[True, '', None, 'remove'],
[True, None, 'b', 'remove'],
[True, None, '', 'remove'],
[True, None, None, 'none'],
]
store = 'swift.container.sync_store.ContainerSyncStore'
with mock.patch(store + '.add_synced_container') as add_container:
with mock.patch(
store + '.remove_synced_container') as remove_container:
sds = sync_store.ContainerSyncStore(self.devices_dir,
self.logger,
False)
add_calls = 0
remove_calls = 0
# We now iterate over the list of combinations
# Validating that add and removed are called as
# expected
for deleted, sync_to, sync_key, expected_op in results_list:
cfile = self.pick_dbfile()
broker = FakeContainerBroker(cfile)
broker._is_deleted = deleted
if sync_to is not None:
broker.metadata['X-Container-Sync-To'] = [
sync_to, 1]
if sync_key is not None:
broker.metadata['X-Container-Sync-Key'] = [
sync_key, 1]
sds.update_sync_store(broker)
if expected_op == 'add':
add_calls += 1
if expected_op == 'remove':
remove_calls += 1
self.assertEqual(add_container.call_count,
add_calls)
self.assertEqual(remove_container.call_count,
remove_calls)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_sync_store.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import os
from swift.common.utils import normalize_timestamp
from swift.container import auditor
from test.debug_logger import debug_logger
from test.unit import with_tempdir
from test.unit.container import test_backend
class TestAuditorMigrations(unittest.TestCase):
@with_tempdir
@mock.patch('swift.common.db_auditor.dump_recon_cache')
def test_db_migration(self, tempdir, mock_recon):
db_path = os.path.join(tempdir, 'sda', 'containers', '0', '0', '0',
'test.db')
with test_backend.TestContainerBrokerBeforeSPI.old_broker() as \
old_ContainerBroker:
broker = old_ContainerBroker(db_path, account='a', container='c')
broker.initialize(normalize_timestamp(0), -1)
with broker.get() as conn:
try:
conn.execute('SELECT storage_policy_index '
'FROM container_stat')
except Exception as err:
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('TestContainerBrokerBeforeSPI broker class '
'was already migrated')
conf = {'devices': tempdir, 'mount_check': False}
test_auditor = auditor.ContainerAuditor(conf, logger=debug_logger())
test_auditor.run_once()
broker = auditor.ContainerBroker(db_path, account='a', container='c')
info = broker.get_info()
expected = {
'account': 'a',
'container': 'c',
'object_count': 0,
'bytes_used': 0,
'storage_policy_index': 0,
}
for k, v in expected.items():
self.assertEqual(info[k], v)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_auditor.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import posix
import mock
import unittest
import itertools
import time
import random
from contextlib import contextmanager
from io import BytesIO
from shutil import rmtree
from tempfile import mkdtemp
from xml.dom import minidom
from eventlet import spawn, Timeout
import json
import six
from six import StringIO
from six.moves.urllib.parse import quote
from swift import __version__ as swift_version
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import (Request, WsgiBytesIO, HTTPNoContent,
bytes_to_wsgi)
import swift.container
from swift.container import server as container_server
from swift.common import constraints
from swift.common.utils import (Timestamp, mkdirs, public, replication,
storage_directory, lock_parent_directory,
ShardRange, RESERVED_STR)
from test.debug_logger import debug_logger
from test.unit import fake_http_connect, mock_check_drive
from swift.common.storage_policy import (POLICIES, StoragePolicy)
from swift.common.request_helpers import get_sys_meta_prefix, get_reserved_name
from test import listen_zero, annotate_failure
from test.unit import patch_policies, make_timestamp_iter, mock_timestamp_now
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
@patch_policies
class TestContainerController(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(
mkdtemp(), 'tmp_test_container_server_ContainerController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.logger = debug_logger()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'},
logger=self.logger)
# some of the policy tests want at least two policies
self.assertTrue(len(POLICIES) > 1)
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def _update_object_put_headers(self, req):
"""
Override this method in test subclasses to test post upgrade
behavior.
"""
pass
def _put_shard_range(self, shard_range):
put_timestamp = shard_range.timestamp.internal
headers = {'X-Backend-Record-Type': 'shard',
'X-Timestamp': put_timestamp}
body = json.dumps([dict(shard_range)])
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
body=body)
resp = req.get_response(self.controller)
self.assertIn(resp.status_int, (201, 202))
def _check_put_container_storage_policy(self, req, policy_index):
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
req = Request.blank(req.path, method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(204, resp.status_int)
self.assertEqual(str(policy_index),
resp.headers['X-Backend-Storage-Policy-Index'])
def _assert_shard_ranges_equal(self, x, y):
# ShardRange.__eq__ only compares lower and upper; here we generate
# dict representations to compare all attributes
self.assertEqual([dict(sr) for sr in x], [dict(sr) for sr in y])
def test_creation(self):
# later config should be extended to assert more config options
app = container_server.ContainerController(
{'node_timeout': '3.5'}, logger=self.logger)
self.assertEqual(app.node_timeout, 3.5)
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
app = container_server.ContainerController(
{'auto_create_account_prefix': '-'}, logger=self.logger)
self.assertEqual(self.logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option '
'will be ignored in a future release.'
])
def test_get_and_validate_policy_index(self):
# no policy is OK
req = Request.blank('/sda1/p/a/container_default', method='PUT',
headers={'X-Timestamp': '0'})
self._check_put_container_storage_policy(req, POLICIES.default.idx)
# bogus policies
for policy in ('nada', 999):
req = Request.blank('/sda1/p/a/c_%s' % policy, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index': policy
})
resp = req.get_response(self.controller)
self.assertEqual(400, resp.status_int)
self.assertIn(b'invalid', resp.body.lower())
# good policies
for policy in POLICIES:
req = Request.blank('/sda1/p/a/c_%s' % policy.name, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index':
policy.idx,
})
self._check_put_container_storage_policy(req, policy.idx)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assertTrue(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assertTrue(response.status.startswith('204'))
self.assertNotIn('x-container-read', response.headers)
self.assertNotIn('x-container-write', response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assertTrue(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assertTrue(response.status.startswith('204'))
self.assertEqual(response.headers.get('x-container-read'), '.r:*')
self.assertEqual(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assertTrue(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assertTrue(response.status.startswith('204'))
self.assertNotIn('x-container-read', response.headers)
self.assertNotIn('x-container-write', response.headers)
# Ensure PUT acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assertTrue(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assertTrue(response.status.startswith('204'))
self.assertEqual(response.headers.get('x-container-read'), '.r:*')
self.assertEqual(response.headers.get('x-container-write'),
'account:user')
def _test_head(self, start, ts):
req = Request.blank('/sda1/p/a/c', method='HEAD')
response = req.get_response(self.controller)
self.assertEqual(response.status_int, 204)
self.assertEqual(response.headers['x-container-bytes-used'], '0')
self.assertEqual(response.headers['x-container-object-count'], '0')
obj_put_request = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'x-timestamp': next(ts),
'x-size': 42,
'x-content-type': 'text/plain',
'x-etag': 'x',
})
self._update_object_put_headers(obj_put_request)
obj_put_resp = obj_put_request.get_response(self.controller)
self.assertEqual(obj_put_resp.status_int // 100, 2)
# re-issue HEAD request
response = req.get_response(self.controller)
self.assertEqual(response.status_int // 100, 2)
self.assertEqual(response.headers['x-container-bytes-used'], '42')
self.assertEqual(response.headers['x-container-object-count'], '1')
# created at time...
created_at_header = Timestamp(response.headers['x-timestamp'])
self.assertEqual(response.headers['x-timestamp'],
created_at_header.normal)
self.assertTrue(created_at_header >= start)
self.assertEqual(response.headers['x-put-timestamp'],
Timestamp(start).normal)
time_fmt = "%a, %d %b %Y %H:%M:%S GMT"
self.assertEqual(
response.last_modified.strftime(time_fmt),
time.strftime(time_fmt, time.gmtime(int(start))))
# backend headers
self.assertEqual(int(response.headers
['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assertTrue(
Timestamp(response.headers['x-backend-timestamp']) >= start)
self.assertEqual(response.headers['x-backend-put-timestamp'],
Timestamp(start).internal)
self.assertEqual(response.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
self.assertEqual(response.headers['x-backend-status-changed-at'],
Timestamp(start).internal)
def test_HEAD(self):
start = int(time.time())
ts = (Timestamp(t).internal for t in itertools.count(start))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'x-timestamp': next(ts)})
req.get_response(self.controller)
self._test_head(Timestamp(start), ts)
def test_HEAD_timestamp_with_offset(self):
start = int(time.time())
ts = (Timestamp(t, offset=1).internal for t in itertools.count(start))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'x-timestamp': next(ts)})
req.get_response(self.controller)
self._test_head(Timestamp(start, offset=1), ts)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
0)
self.assertEqual(resp.headers['x-backend-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-put-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-status-changed-at'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
self.assertIsNone(resp.last_modified)
for header in ('x-container-object-count', 'x-container-bytes-used',
'x-timestamp', 'x-put-timestamp'):
self.assertIsNone(resp.headers[header])
def test_deleted_headers(self):
request_method_times = {
'PUT': next(self.ts).internal,
'DELETE': next(self.ts).internal,
}
# setup a deleted container
for method in ('PUT', 'DELETE'):
x_timestamp = request_method_times[method]
req = Request.blank('/sda1/p/a/c', method=method,
headers={'x-timestamp': x_timestamp})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a/c', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertIsNone(resp.last_modified)
# backend headers
self.assertEqual(int(resp.headers[
'X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assertTrue(Timestamp(resp.headers['x-backend-timestamp']) >=
Timestamp(request_method_times['PUT']))
self.assertEqual(resp.headers['x-backend-put-timestamp'],
request_method_times['PUT'])
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
request_method_times['DELETE'])
self.assertEqual(resp.headers['x-backend-status-changed-at'],
request_method_times['DELETE'])
for header in ('x-container-object-count',
'x-container-bytes-used', 'x-timestamp',
'x-put-timestamp'):
self.assertIsNone(resp.headers[header])
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_HEAD_invalid_accept(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain;q'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.body, b'')
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_OPTIONS(self):
server_handler = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), sorted(
'OPTIONS GET POST PUT DELETE HEAD REPLICATE UPDATE'.split()))
self.assertEqual(resp.headers['Server'],
(self.controller.server_type + '/' + swift_version))
def test_insufficient_storage_mount_check_true(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
container_controller = container_server.ContainerController(conf)
self.assertTrue(container_controller.mount_check)
for method in container_controller.allowed_methods:
if method == 'OPTIONS':
continue
path = '/sda1/p/'
if method == 'REPLICATE':
path += 'suff'
else:
path += 'a/c'
req = Request.blank(path, method=method,
headers={'x-timestamp': '1'})
with mock_check_drive() as mocks:
try:
resp = req.get_response(container_controller)
self.assertEqual(resp.status_int, 507)
mocks['ismount'].return_value = True
resp = req.get_response(container_controller)
self.assertNotEqual(resp.status_int, 507)
# feel free to rip out this last assertion...
expected = 2 if method == 'PUT' else 4
self.assertEqual(resp.status_int // 100, expected)
except AssertionError as e:
self.fail('%s for %s' % (e, method))
def test_insufficient_storage_mount_check_false(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
container_controller = container_server.ContainerController(conf)
self.assertFalse(container_controller.mount_check)
for method in container_controller.allowed_methods:
if method == 'OPTIONS':
continue
path = '/sda1/p/'
if method == 'REPLICATE':
path += 'suff'
else:
path += 'a/c'
req = Request.blank(path, method=method,
headers={'x-timestamp': '1'})
with mock_check_drive() as mocks:
try:
resp = req.get_response(container_controller)
self.assertEqual(resp.status_int, 507)
mocks['isdir'].return_value = True
resp = req.get_response(container_controller)
self.assertNotEqual(resp.status_int, 507)
# feel free to rip out this last assertion...
expected = 2 if method == 'PUT' else 4
self.assertEqual(resp.status_int // 100, expected)
except AssertionError as e:
self.fail('%s for %s' % (e, method))
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_HEAD_put_timestamp_updates(self):
put_ts = Timestamp(1)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_ts.internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
def do_put_head(put_ts, meta_value, extra_hdrs, body='', path='a/c'):
# Set metadata header
req = Request.blank('/sda1/p/' + path,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_ts.internal,
'X-Container-Meta-Test': meta_value},
body=body)
req.headers.update(extra_hdrs)
resp = req.get_response(self.controller)
self.assertTrue(resp.is_success)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
return resp.headers
# put timestamp is advanced on PUT with container path
put_ts = Timestamp(2)
resp_hdrs = do_put_head(put_ts, 'val1',
{'x-backend-no-timestamp-update': 'false'})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val1')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
put_ts.internal)
self.assertEqual(resp_hdrs.get('x-put-timestamp'), put_ts.internal)
put_ts = Timestamp(3)
resp_hdrs = do_put_head(put_ts, 'val2',
{'x-backend-no-timestamp-update': 'true'})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val2')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
put_ts.internal)
self.assertEqual(resp_hdrs.get('x-put-timestamp'), put_ts.internal)
# put timestamp is NOT updated if record type is shard
put_ts = Timestamp(4)
resp_hdrs = do_put_head(
put_ts, 'val3', {'x-backend-record-type': 'shard'},
body=json.dumps([dict(ShardRange('x/y', 123.4))]))
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
Timestamp(3).internal)
self.assertEqual(resp_hdrs.get('x-put-timestamp'),
Timestamp(3).internal)
# put timestamp and metadata are NOT updated for request with obj path
put_ts = Timestamp(5)
resp_hdrs = do_put_head(
put_ts, 'val4',
{'x-content-type': 'plain/text', 'x-size': 0, 'x-etag': 'an-etag'},
path='a/c/o')
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
Timestamp(3).internal)
self.assertEqual(resp_hdrs.get('x-put-timestamp'),
Timestamp(3).internal)
def test_PUT_insufficient_space(self):
conf = {'devices': self.testdir,
'mount_check': 'false',
'fallocate_reserve': '2%'}
container_controller = container_server.ContainerController(conf)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1517617825.74832'})
statvfs_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
59000, # f_bfree
57000, # f_bavail (just under 2% free)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs',
return_value=statvfs_result) as mock_statvfs:
resp = req.get_response(container_controller)
self.assertEqual(resp.status_int, 507)
self.assertEqual(mock_statvfs.mock_calls,
[mock.call(os.path.join(self.testdir, 'sda1'))])
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.container.backend import ContainerBroker as OrigCoBr
class InterceptedCoBr(OrigCoBr):
def __init__(self, *args, **kwargs):
super(InterceptedCoBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self._db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self._db_file = self._saved_db_file
return super(InterceptedCoBr, self).initialize(*args, **kwargs)
with mock.patch("swift.container.server.ContainerBroker",
InterceptedCoBr):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_PUT_good_policy_specified(self):
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index':
policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# now make sure we read it back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(POLICIES.default.idx))
# now make sure the default was used (pol 1)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(POLICIES.default.idx))
def test_PUT_bad_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index': 'nada'})
resp = req.get_response(self.controller)
# make sure we get bad response
self.assertEqual(resp.status_int, 400)
self.assertFalse('X-Backend-Storage-Policy-Index' in resp.headers)
def test_PUT_no_policy_change(self):
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# make sure we get the right index back
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# now try to update w/o changing the policy
for method in ('POST', 'PUT'):
req = Request.blank('/sda1/p/a/c', method=method, headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': policy.idx
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
# make sure we get the right index back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_bad_policy_change(self):
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# make sure we get the right index back
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 409)
self.assertEqual(
resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# and make sure there is no change!
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# make sure we get the right index back
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_POST_ignores_policy_change(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# make sure we get the right index back
self.assertEqual(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='POST', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
# valid request
self.assertEqual(resp.status_int // 100, 2)
# but it does nothing
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# make sure we get the right index back
self.assertEqual(resp.headers.get
('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_for_existing_default(self):
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
# put again without specifying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_PUT_proxy_default_no_policy_for_existing_default(self):
# make it look like the proxy has a different default than we do, like
# during a config change restart across a multi node cluster.
proxy_default = random.choice([p for p in POLICIES if not
p.is_default])
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Default': int(proxy_default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
# put again without proxy specifying the different default
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Default': int(POLICIES.default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
def test_PUT_no_policy_for_existing_non_default(self):
non_default_policy = [p for p in POLICIES if not p.is_default][0]
# create a container with the non-default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
# put again without specifying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
def test_create_reserved_namespace_container(self):
path = '/sda1/p/a/%sc' % RESERVED_STR
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status, '201 Created', resp.body)
path = '/sda1/p/a/%sc%stest' % (RESERVED_STR, RESERVED_STR)
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status, '201 Created', resp.body)
def test_create_reserved_object_in_container(self):
# create container
path = '/sda1/p/a/c/'
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# put null object in it
path += '%so' % RESERVED_STR
req = Request.blank(path, method='PUT', headers={
'X-Timestamp': next(self.ts).internal,
'X-Size': 0,
'X-Content-Type': 'application/x-test',
'X-Etag': 'x',
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, b'Invalid reserved-namespace object '
b'in user-namespace container')
def test_PUT_non_utf8_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Set sysmeta header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Sysmeta-Test': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Set ACL
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Read': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Send other
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Will-Not-Be-Saved': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEqual(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertNotIn('x-container-meta-test', resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
key2 = '%sTest2' % prefix
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key2: 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()), 'Value')
self.assertEqual(resp.headers.get(key2.lower()), 'Value2')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertNotIn(key.lower(), resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEqual(resp.headers.get('x-put-timestamp'),
'0000000001.00000')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'),
'New Value')
self.assertEqual(resp.headers.get('x-put-timestamp'),
'0000000003.00000')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-container-meta-test'),
'New Value')
self.assertEqual(resp.headers.get('x-put-timestamp'),
'0000000003.00000')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertNotIn('x-container-meta-test', resp.headers)
self.assertEqual(resp.headers.get('x-put-timestamp'),
'0000000004.00000')
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()), 'Value')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertNotIn(key.lower(), resp.headers)
def test_POST_HEAD_no_timestamp_update(self):
put_ts = Timestamp(1)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': put_ts.internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
def do_post_head(post_ts, value, extra_hdrs):
# Set metadata header
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': post_ts.internal,
'X-Container-Meta-Test': value})
req.headers.update(extra_hdrs)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
return resp.headers
# verify timestamp IS advanced
post_ts = Timestamp(2)
resp_hdrs = do_post_head(post_ts, 'val1', {})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val1')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
post_ts.internal)
post_ts = Timestamp(3)
resp_hdrs = do_post_head(post_ts, 'val2',
{'x-backend-no-timestamp-update': 'false'})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val2')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
post_ts.internal)
# verify timestamp IS NOT advanced, but metadata still updated
post_ts = Timestamp(4)
resp_hdrs = do_post_head(post_ts, 'val3',
{'x-backend-No-timeStamp-update': 'true'})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
Timestamp(3).internal)
# verify timestamp will not go backwards
post_ts = Timestamp(2)
resp_hdrs = do_post_head(post_ts, 'val4',
{'x-backend-no-timestamp-update': 'true'})
self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
Timestamp(3).internal)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_insufficient_space(self):
conf = {'devices': self.testdir,
'mount_check': 'false',
'fallocate_reserve': '2%'}
container_controller = container_server.ContainerController(conf)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1517618035.469202'})
statvfs_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
59000, # f_bfree
57000, # f_bavail (just under 2% free)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs',
return_value=statvfs_result) as mock_statvfs:
resp = req.get_response(container_controller)
self.assertEqual(resp.status_int, 507)
self.assertEqual(mock_statvfs.mock_calls,
[mock.call(os.path.join(self.testdir, 'sda1'))])
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name,
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEqual(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen_zero()
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen_zero()
def accept(return_code, expected_timestamp):
if not isinstance(expected_timestamp, bytes):
expected_timestamp = expected_timestamp.encode('ascii')
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = inc.readline()
self.assertEqual(headers[b'x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, Timestamp(1).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assertTrue(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], 123)
self.assertEqual(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], 123)
self.assertEqual(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], 123)
self.assertEqual(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], 123)
self.assertEqual(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEqual(info['x_container_sync_point1'], -1)
self.assertEqual(info['x_container_sync_point2'], -1)
def test_update_sync_store_on_PUT(self):
# Create a synced container and validate a link is created
self._create_synced_container_and_validate_sync_store('PUT')
# remove the sync using PUT and validate the link is deleted
self._remove_sync_and_validate_sync_store('PUT')
def test_update_sync_store_on_POST(self):
# Create a container and validate a link is not created
self._create_container_and_validate_sync_store()
# Update the container to be synced and validate a link is created
self._create_synced_container_and_validate_sync_store('POST')
# remove the sync using POST and validate the link is deleted
self._remove_sync_and_validate_sync_store('POST')
def test_update_sync_store_on_DELETE(self):
# Create a synced container and validate a link is created
self._create_synced_container_and_validate_sync_store('PUT')
# Remove the container and validate the link is deleted
self._remove_sync_and_validate_sync_store('DELETE')
def _create_container_and_validate_sync_store(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '0'})
req.get_response(self.controller)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
sync_store = self.controller.sync_store
db_path = db.db_file
db_link = sync_store._container_to_synced_container_path(db_path)
self.assertFalse(os.path.exists(db_link))
sync_containers = [c for c in sync_store.synced_containers_generator()]
self.assertFalse(sync_containers)
def _create_synced_container_and_validate_sync_store(self, method):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': method},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c',
'x-container-sync-key': '1234'})
req.get_response(self.controller)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
sync_store = self.controller.sync_store
db_path = db.db_file
db_link = sync_store._container_to_synced_container_path(db_path)
self.assertTrue(os.path.exists(db_link))
sync_containers = [c for c in sync_store.synced_containers_generator()]
self.assertEqual(1, len(sync_containers))
self.assertEqual(db_path, sync_containers[0])
def _remove_sync_and_validate_sync_store(self, method):
if method == 'DELETE':
headers = {'x-timestamp': '2'}
else:
headers = {'x-timestamp': '2',
'x-container-sync-to': '',
'x-container-sync-key': '1234'}
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
req.get_response(self.controller)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
sync_store = self.controller.sync_store
db_path = db.db_file
db_link = sync_store._container_to_synced_container_path(db_path)
self.assertFalse(os.path.exists(db_link))
sync_containers = [c for c in sync_store.synced_containers_generator()]
self.assertFalse(sync_containers)
def test_REPLICATE_rsync_then_merge_works(self):
def fake_rsync_then_merge(self, drive, db_file, args):
return HTTPNoContent()
with mock.patch("swift.container.replicator.ContainerReplicatorRpc."
"rsync_then_merge", fake_rsync_then_merge):
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = b'["rsync_then_merge", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
def test_REPLICATE_complete_rsync_works(self):
def fake_complete_rsync(self, drive, db_file, args):
return HTTPNoContent()
with mock.patch("swift.container.replicator.ContainerReplicatorRpc."
"complete_rsync", fake_complete_rsync):
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = b'["complete_rsync", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
def test_REPLICATE_value_error_works(self):
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
# check valuerror
wsgi_input_valuerror = b'["sync" : sync, "-1"]'
inbuf1 = WsgiBytesIO(wsgi_input_valuerror)
req.environ['wsgi.input'] = inbuf1
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_REPLICATE_unknown_sync(self):
# First without existing DB file
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = b'["unknown_sync", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
mkdirs(os.path.join(self.testdir, 'sda1', 'containers', 'p', 'a', 'a'))
db_file = os.path.join(self.testdir, 'sda1',
storage_directory('containers', 'p', 'a'),
'a' + '.db')
open(db_file, 'w')
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = b'["unknown_sync", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 500)
def test_REPLICATE_insufficient_space(self):
conf = {'devices': self.testdir,
'mount_check': 'false',
'fallocate_reserve': '2%'}
container_controller = container_server.ContainerController(conf)
req = Request.blank(
'/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'})
statvfs_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
59000, # f_bfree
57000, # f_bavail (just under 2% free)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs',
return_value=statvfs_result) as mock_statvfs:
resp = req.get_response(container_controller)
self.assertEqual(resp.status_int, 507)
self.assertEqual(mock_statvfs.mock_calls,
[mock.call(os.path.join(self.testdir, 'sda1'))])
def test_UPDATE(self):
ts_iter = make_timestamp_iter()
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
ts_iter = make_timestamp_iter()
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'UPDATE'},
headers={'X-Timestamp': next(ts_iter).internal},
body='[invalid json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
ts_iter = make_timestamp_iter()
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
obj_ts = next(ts_iter)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'UPDATE'},
headers={'X-Timestamp': next(ts_iter).internal},
body=json.dumps([
{'name': 'some obj', 'deleted': 0,
'created_at': obj_ts.internal,
'etag': 'whatever', 'size': 1234,
'storage_policy_index': POLICIES.default.idx,
'content_type': 'foo/bar'},
{'name': 'some tombstone', 'deleted': 1,
'created_at': next(ts_iter).internal,
'etag': 'noetag', 'size': 0,
'storage_policy_index': POLICIES.default.idx,
'content_type': 'application/deleted'},
{'name': 'wrong policy', 'deleted': 0,
'created_at': next(ts_iter).internal,
'etag': 'whatever', 'size': 6789,
'storage_policy_index': 1,
'content_type': 'foo/bar'},
]))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
{'name': 'some obj', 'hash': 'whatever', 'bytes': 1234,
'content_type': 'foo/bar', 'last_modified': obj_ts.isoformat},
])
def test_UPDATE_autocreate(self):
ts_iter = make_timestamp_iter()
req = Request.blank(
'/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
obj_ts = next(ts_iter)
req = Request.blank(
'/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'UPDATE'},
headers={
'X-Timestamp': next(ts_iter).internal,
'X-Backend-Storage-Policy-Index': str(POLICIES.default.idx)},
body=json.dumps([
{'name': 'some obj', 'deleted': 0,
'created_at': obj_ts.internal,
'etag': 'whatever', 'size': 1234,
'storage_policy_index': POLICIES.default.idx,
'content_type': 'foo/bar'},
{'name': 'some tombstone', 'deleted': 1,
'created_at': next(ts_iter).internal,
'etag': 'noetag', 'size': 0,
'storage_policy_index': POLICIES.default.idx,
'content_type': 'application/deleted'},
{'name': 'wrong policy', 'deleted': 0,
'created_at': next(ts_iter).internal,
'etag': 'whatever', 'size': 6789,
'storage_policy_index': 1,
'content_type': 'foo/bar'},
]))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202, resp.body)
req = Request.blank(
'/sda1/p/.a/c?format=json',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
{'name': 'some obj', 'hash': 'whatever', 'bytes': 1234,
'content_type': 'foo/bar', 'last_modified': obj_ts.isoformat},
])
def test_DELETE(self):
ts_iter = make_timestamp_iter()
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# PUT an *empty* shard range
sr = ShardRange('.shards_a/c', next(ts_iter), 'l', 'u', 0, 0,
state=ShardRange.ACTIVE)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Record-Type': 'shard'},
body=json.dumps([dict(sr)]))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Record-Type': 'shard'},
params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
# the override-deleted header is ignored for object records
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Override-Deleted': 'true'},
params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
# but override-deleted header makes shard ranges available after DELETE
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Deleted': 'true'},
params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual([dict(sr, last_modified=sr.timestamp.isoformat)],
json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
# ... unless the override header equates to False
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Deleted': 'no'},
params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('X-Backend-Record-Type', resp.headers)
# ...or the db file is unlinked
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertTrue(os.path.exists(broker.db_file))
os.unlink(broker.db_file)
self.assertFalse(os.path.exists(broker.db_file))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'},
headers={'X-Timestamp': next(ts_iter).internal,
'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Deleted': 'true'},
params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('X-Backend-Record-Type', resp.headers)
def test_DELETE_PUT_recreate(self):
path = '/sda1/p/a/c'
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404) # sanity
# backend headers
expectations = {
'x-backend-put-timestamp': Timestamp(1).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(2).internal,
}
for header, value in expectations.items():
self.assertEqual(resp.headers[header], value,
'response header %s was %s not %s' % (
header, resp.headers[header], value))
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(True, db.is_deleted())
info = db.get_info()
self.assertEqual(info['put_timestamp'], Timestamp('1').internal)
self.assertEqual(info['delete_timestamp'], Timestamp('2').internal)
self.assertEqual(info['status_changed_at'], Timestamp('2').internal)
# recreate
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
info = db.get_info()
self.assertEqual(info['put_timestamp'], Timestamp('4').internal)
self.assertEqual(info['delete_timestamp'], Timestamp('2').internal)
self.assertEqual(info['status_changed_at'], Timestamp('4').internal)
for method in ('GET', 'HEAD'):
req = Request.blank(path)
resp = req.get_response(self.controller)
expectations = {
'x-put-timestamp': Timestamp(4).normal,
'x-backend-put-timestamp': Timestamp(4).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(4).internal,
}
for header, expected in expectations.items():
self.assertEqual(resp.headers[header], expected,
'header %s was %s is not expected %s' % (
header, resp.headers[header], expected))
def test_DELETE_PUT_recreate_replication_race(self):
path = '/sda1/p/a/c'
# create a deleted db
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404) # sanity
self.assertEqual(True, db.is_deleted())
# now save a copy of this db (and remove it from the "current node")
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
db_path = db._db_file
other_path = os.path.join(self.testdir, 'othernode.db')
os.rename(db_path, other_path)
# that should make it missing on this node
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404) # sanity
# setup the race in os.path.exists (first time no, then yes)
mock_called = []
_real_exists = os.path.exists
def mock_exists(db_path):
rv = _real_exists(db_path)
if db_path != db._db_file:
return rv
if not mock_called:
# be as careful as we might hope backend replication can be...
with lock_parent_directory(db_path, timeout=1):
os.rename(other_path, db_path)
mock_called.append((rv, db_path))
return rv
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
with mock.patch.object(container_server.os.path, 'exists',
mock_exists):
resp = req.get_response(self.controller)
# db was successfully created
self.assertEqual(resp.status_int // 100, 2)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
# mock proves the race
self.assertEqual(mock_called[:2],
[(exists, db.db_file) for exists in (False, True)])
# info was updated
info = db.get_info()
self.assertEqual(info['put_timestamp'], Timestamp('4').internal)
self.assertEqual(info['delete_timestamp'], Timestamp('2').internal)
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_change_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': next(ts),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# try re-recreate with other policies
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# first delete the existing container
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted
# state, so changing the policy index is perfectly acceptable
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(ts),
'X-Backend-Storage-Policy-Index': other_policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(other_policy.idx))
def test_change_to_default_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
non_default_policy = random.choice([p for p in POLICIES
if not p.is_default])
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(ts),
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted state,
# so changing the policy index is perfectly acceptable
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
ts = (Timestamp(t).internal for t in
itertools.count(3))
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 409)
req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts)})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='GET', headers={
'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_object_update_with_offset(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create container
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': next(ts)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check status
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
# create object
obj_timestamp = next(ts)
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': obj_timestamp, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 1)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 1)
self.assertEqual(obj['hash'], 'x')
self.assertEqual(obj['content_type'], 'text/plain')
# send an update with an offset
offset_timestamp = Timestamp(obj_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 2,
'X-Content-Type': 'text/html', 'X-Etag': 'y'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'y')
self.assertEqual(obj['content_type'], 'text/html')
# now overwrite with a newer time
delete_timestamp = next(ts)
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
# recreate with an offset
offset_timestamp = Timestamp(delete_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 3,
'X-Content-Type': 'text/enriched', 'X-Etag': 'z'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check un-deleted listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 3)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 3)
self.assertEqual(obj['hash'], 'z')
self.assertEqual(obj['content_type'], 'text/enriched')
# delete offset with newer offset
delete_timestamp = Timestamp(offset_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
def test_object_update_with_multiple_timestamps(self):
def do_update(t_data, etag, size, content_type,
t_type=None, t_meta=None):
"""
Make a PUT request to container controller to update an object
"""
headers = {'X-Timestamp': t_data.internal,
'X-Size': size,
'X-Content-Type': content_type,
'X-Etag': etag}
if t_type:
headers['X-Content-Type-Timestamp'] = t_type.internal
if t_meta:
headers['X-Meta-Timestamp'] = t_meta.internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers=headers)
self._update_object_put_headers(req)
return req.get_response(self.controller)
ts = (Timestamp(t) for t in itertools.count(int(time.time())))
t0 = next(ts)
# create container
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': t0.internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check status
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# create object at t1
t1 = next(ts)
resp = do_update(t1, 'etag_at_t1', 1, 'ctype_at_t1')
self.assertEqual(resp.status_int, 201)
# check listing, expect last_modified = t1
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 1)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 1)
self.assertEqual(obj['hash'], 'etag_at_t1')
self.assertEqual(obj['content_type'], 'ctype_at_t1')
self.assertEqual(obj['last_modified'], t1.isoformat)
# send an update with a content type timestamp at t4
t2 = next(ts)
t3 = next(ts)
t4 = next(ts)
resp = do_update(t1, 'etag_at_t1', 1, 'ctype_at_t4', t_type=t4)
self.assertEqual(resp.status_int, 201)
# check updated listing, expect last_modified = t4
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 1)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 1)
self.assertEqual(obj['hash'], 'etag_at_t1')
self.assertEqual(obj['content_type'], 'ctype_at_t4')
self.assertEqual(obj['last_modified'], t4.isoformat)
# now overwrite with an in-between data timestamp at t2
resp = do_update(t2, 'etag_at_t2', 2, 'ctype_at_t2', t_type=t2)
self.assertEqual(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'etag_at_t2')
self.assertEqual(obj['content_type'], 'ctype_at_t4')
self.assertEqual(obj['last_modified'], t4.isoformat)
# now overwrite with an in-between content-type timestamp at t3
resp = do_update(t2, 'etag_at_t2', 2, 'ctype_at_t3', t_type=t3)
self.assertEqual(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'etag_at_t2')
self.assertEqual(obj['content_type'], 'ctype_at_t4')
self.assertEqual(obj['last_modified'], t4.isoformat)
# now update with an in-between meta timestamp at t5
t5 = next(ts)
resp = do_update(t2, 'etag_at_t2', 2, 'ctype_at_t3', t_type=t3,
t_meta=t5)
self.assertEqual(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'etag_at_t2')
self.assertEqual(obj['content_type'], 'ctype_at_t4')
self.assertEqual(obj['last_modified'], t5.isoformat)
# delete object at t6
t6 = next(ts)
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': t6.internal})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
# subsequent content type timestamp at t8 should leave object deleted
t7 = next(ts)
t8 = next(ts)
t9 = next(ts)
resp = do_update(t2, 'etag_at_t2', 2, 'ctype_at_t8', t_type=t8,
t_meta=t9)
self.assertEqual(resp.status_int, 201)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
# object recreated at t7 should pick up existing, later content-type
resp = do_update(t7, 'etag_at_t7', 7, 'ctype_at_t7')
self.assertEqual(resp.status_int, 201)
# check listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 7)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 7)
self.assertEqual(obj['hash'], 'etag_at_t7')
self.assertEqual(obj['content_type'], 'ctype_at_t8')
self.assertEqual(obj['last_modified'], t9.isoformat)
def test_DELETE_account_update(self):
bindsock = listen_zero()
def accept(return_code, expected_timestamp):
if not isinstance(expected_timestamp, bytes):
expected_timestamp = expected_timestamp.encode('ascii')
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = inc.readline()
self.assertEqual(headers[b'x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, Timestamp(2).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(4).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assertTrue(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(constraints.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_PUT_shard_range_autocreates_shard_container(self):
ts_iter = make_timestamp_iter()
shard_range = ShardRange('.shards_a/shard_c', next(ts_iter))
put_timestamp = next(ts_iter).internal
headers = {'X-Backend-Record-Type': 'shard',
'X-Timestamp': put_timestamp,
'X-Container-Sysmeta-Test': 'set',
'X-Container-Meta-Test': 'persisted'}
# PUT shard range to non-existent container without autocreate flag
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(404, resp.status_int)
# PUT shard range to non-existent container with autocreate flag,
# missing storage policy
headers['X-Timestamp'] = next(ts_iter).internal
headers['X-Backend-Auto-Create'] = 't'
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(400, resp.status_int)
self.assertIn(b'X-Backend-Storage-Policy-Index header is required',
resp.body)
# PUT shard range to non-existent container with autocreate flag
headers['X-Timestamp'] = next(ts_iter).internal
policy_index = random.choice(POLICIES).idx
headers['X-Backend-Storage-Policy-Index'] = str(policy_index)
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
# repeat PUT of shard range to autocreated container - 202 response
headers['X-Timestamp'] = next(ts_iter).internal
headers.pop('X-Backend-Storage-Policy-Index') # no longer required
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
# regular PUT to autocreated container - 202 response
headers['X-Timestamp'] = next(ts_iter).internal
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT',
headers={'X-Timestamp': next(ts_iter).internal},
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
def test_PUT_shard_range_to_deleted_container(self):
ts_iter = make_timestamp_iter()
put_time = next(ts_iter).internal
# create a container, get it to sharded state and then delete it
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': put_time})
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
broker.enable_sharding(next(ts_iter))
self.assertTrue(broker.set_sharding_state())
self.assertTrue(broker.set_sharded_state())
delete_time = next(ts_iter).internal
req = Request.blank('/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': delete_time})
resp = req.get_response(self.controller)
self.assertEqual(204, resp.status_int)
self.assertTrue(broker.is_deleted())
self.assertEqual(delete_time, broker.get_info()['delete_timestamp'])
self.assertEqual(put_time, broker.get_info()['put_timestamp'])
req = Request.blank('/sda1/p/a/c', method='GET')
resp = req.get_response(self.controller)
self.assertEqual(404, resp.status_int)
# shard range PUT is accepted but container remains deleted
shard_range = ShardRange('.shards_a/shard_c', next(ts_iter),
state=ShardRange.ACTIVE)
headers = {'X-Backend-Record-Type': 'shard',
'X-Timestamp': next(ts_iter).internal,
'X-Container-Sysmeta-Test': 'set',
'X-Container-Meta-Test': 'persisted'}
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertTrue(broker.get_info_is_deleted()[1])
self.assertEqual(delete_time, broker.get_info()['delete_timestamp'])
self.assertEqual(put_time, broker.get_info()['put_timestamp'])
req = Request.blank('/sda1/p/a/c', method='GET')
resp = req.get_response(self.controller)
self.assertEqual(404, resp.status_int)
# unless shard range has non-zero stats, then container is revived
shard_range.update_meta(99, 1234, meta_timestamp=next(ts_iter))
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertFalse(broker.get_info_is_deleted()[1])
self.assertEqual(delete_time, broker.get_info()['delete_timestamp'])
self.assertEqual(put_time, broker.get_info()['put_timestamp'])
req = Request.blank('/sda1/p/a/c', method='GET')
resp = req.get_response(self.controller)
self.assertEqual(204, resp.status_int)
self.assertEqual('99', resp.headers['X-Container-Object-Count'])
def test_PUT_shard_range_json_in_body(self):
ts_iter = make_timestamp_iter()
oldest_ts = next(ts_iter) # used for stale shard range PUT later
shard_bounds = [('', 'ham', ShardRange.ACTIVE),
('ham', 'salami', ShardRange.ACTIVE),
('salami', '', ShardRange.CREATED)]
shard_ranges = [
ShardRange('.shards_a/_%s' % upper, next(ts_iter),
lower, upper,
i * 100, i * 1000, meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter))
for i, (lower, upper, state) in enumerate(shard_bounds)]
put_timestamp = next(ts_iter).internal
headers = {'X-Backend-Record-Type': 'shard',
'X-Timestamp': put_timestamp,
'X-Container-Sysmeta-Test': 'set',
'X-Container-Meta-Test': 'persisted'}
body = json.dumps([dict(sr) for sr in shard_ranges[:2]])
# PUT some shard ranges to non-existent container
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
body=body)
resp = req.get_response(self.controller)
self.assertEqual(404, resp.status_int)
# create the container with a regular PUT
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': put_timestamp}, body=body)
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
# now we can PUT shard ranges
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
# check broker
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
# sysmeta and user meta is updated
exp_meta = {'X-Container-Sysmeta-Test': 'set',
'X-Container-Meta-Test': 'persisted'}
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
self._assert_shard_ranges_equal(shard_ranges[:2],
broker.get_shard_ranges())
# empty json dict
body = json.dumps({})
headers['X-Timestamp'] = next(ts_iter).internal
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self._assert_shard_ranges_equal(shard_ranges[:2],
broker.get_shard_ranges())
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
older_ts = next(ts_iter) # used for stale shard range PUT later
# updated and new shard ranges
shard_ranges[1].bytes_used += 100
shard_ranges[1].meta_timestamp = next(ts_iter)
body = json.dumps([dict(sr) for sr in shard_ranges[1:]])
headers['X-Timestamp'] = next(ts_iter).internal
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
# stale shard range
stale_shard_range = shard_ranges[1].copy()
stale_shard_range.bytes_used = 0
stale_shard_range.object_count = 0
stale_shard_range.meta_timestamp = older_ts
stale_shard_range.state = ShardRange.CREATED
stale_shard_range.state_timestamp = oldest_ts
body = json.dumps([dict(stale_shard_range)])
headers['X-Timestamp'] = next(ts_iter).internal
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
# deleted shard range
shard_ranges[0].deleted = 1
shard_ranges[0].timestamp = next(ts_iter)
body = json.dumps([dict(shard_ranges[0])])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self._assert_shard_ranges_equal(
shard_ranges, broker.get_shard_ranges(include_deleted=True))
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
def check_bad_body(body):
bad_put_timestamp = next(ts_iter).internal
headers['X-Timestamp'] = bad_put_timestamp
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(400, resp.status_int)
self.assertIn(b'Invalid body', resp.body)
self.assertEqual(
exp_meta, dict((k, v[0]) for k, v in broker.metadata.items()))
self._assert_shard_ranges_equal(
shard_ranges, broker.get_shard_ranges(include_deleted=True))
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
check_bad_body('not json')
check_bad_body('')
check_bad_body('["not a shard range"]')
check_bad_body('[[]]')
bad_shard_range = dict(ShardRange('a/c', next(ts_iter)))
bad_shard_range.pop('timestamp')
check_bad_body(json.dumps([bad_shard_range]))
def check_not_shard_record_type(headers):
# body ignored
body = json.dumps([dict(sr) for sr in shard_ranges])
# note, regular PUT so put timestamp is updated
put_timestamp = next(ts_iter).internal
headers['X-Timestamp'] = put_timestamp
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
self._assert_shard_ranges_equal(
shard_ranges, broker.get_shard_ranges(include_deleted=True))
self.assertEqual(put_timestamp, broker.get_info()['put_timestamp'])
check_not_shard_record_type({'X-Backend-Record-Type': 'object',
'X-Timestamp': next(ts_iter).internal})
check_not_shard_record_type({'X-Timestamp': next(ts_iter).internal})
def test_PUT_GET_shard_ranges(self):
# make a container
ts_iter = make_timestamp_iter()
ts_now = Timestamp.now() # used when mocking Timestamp.now()
ts_put = next(ts_iter)
headers = {'X-Timestamp': ts_put.normal}
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers)
self.assertEqual(201, req.get_response(self.controller).status_int)
# PUT some objects
objects = [{'name': 'obj_%d' % i,
'x-timestamp': next(ts_iter).normal,
'x-content-type': 'text/plain',
'x-etag': 'etag_%d' % i,
'x-size': 1024 * i
} for i in range(2)]
for obj in objects:
req = Request.blank('/sda1/p/a/c/%s' % obj['name'], method='PUT',
headers=obj)
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
# PUT some shard ranges
shard_bounds = [('', 'apple', ShardRange.SHRINKING),
('apple', 'ham', ShardRange.CLEAVED),
('ham', 'salami', ShardRange.ACTIVE),
('salami', 'yoghurt', ShardRange.CREATED),
('yoghurt', '', ShardRange.FOUND),
]
shard_ranges = [
ShardRange('.sharded_a/_%s' % upper, next(ts_iter),
lower, upper,
i * 100, i * 1000, meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter))
for i, (lower, upper, state) in enumerate(shard_bounds)]
for shard_range in shard_ranges:
self._put_shard_range(shard_range)
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertTrue(broker.is_root_container()) # sanity
self._assert_shard_ranges_equal(shard_ranges,
broker.get_shard_ranges())
# sanity check - no shard ranges when GET is only for objects
def check_object_GET(path):
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
expected = [
dict(hash=obj['x-etag'], bytes=obj['x-size'],
content_type=obj['x-content-type'],
last_modified=Timestamp(obj['x-timestamp']).isoformat,
name=obj['name']) for obj in objects]
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('object', resp.headers['X-Backend-Record-Type'])
check_object_GET('/sda1/p/a/c?format=json')
# GET only shard ranges
def check_shard_GET(expected_shard_ranges, path, params=''):
req = Request.blank('/sda1/p/%s?format=json%s' %
(path, params), method='GET',
headers={'X-Backend-Record-Type': 'shard'})
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
expected = [
dict(sr, last_modified=Timestamp(sr.timestamp).isoformat)
for sr in expected_shard_ranges]
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
def check_shard_GET_override_filter(
expected_shard_ranges, path, state, params=''):
req_headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Override-Shard-Name-Filter': state}
req = Request.blank('/sda1/p/%s?format=json%s' %
(path, params), method='GET',
headers=req_headers)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
expected = [
dict(sr, last_modified=Timestamp(sr.timestamp).isoformat)
for sr in expected_shard_ranges]
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
return resp
# all shards
check_shard_GET(shard_ranges, 'a/c')
check_shard_GET(reversed(shard_ranges), 'a/c', params='&reverse=true')
# only created shards
check_shard_GET(shard_ranges[3:4], 'a/c', params='&states=created')
# only found shards
check_shard_GET(shard_ranges[4:5], 'a/c', params='&states=found')
# only cleaved shards
check_shard_GET(shard_ranges[1:2], 'a/c',
params='&states=cleaved')
# only active shards
check_shard_GET(shard_ranges[2:3], 'a/c',
params='&states=active&end_marker=pickle')
# only cleaved or active shards, reversed
check_shard_GET(
reversed(shard_ranges[1:3]), 'a/c',
params='&states=cleaved,active&reverse=true&marker=pickle')
# only shrinking shards
check_shard_GET(shard_ranges[:1], 'a/c',
params='&states=shrinking&end_marker=pickle')
check_shard_GET(shard_ranges[:1], 'a/c',
params='&states=shrinking&reverse=true&marker=pickle')
# only active or shrinking shards
check_shard_GET([shard_ranges[0], shard_ranges[2]], 'a/c',
params='&states=shrinking,active&end_marker=pickle')
check_shard_GET(
[shard_ranges[2], shard_ranges[0]], 'a/c',
params='&states=active,shrinking&reverse=true&marker=pickle')
# only active or shrinking shards using listing alias
check_shard_GET(shard_ranges[:3], 'a/c',
params='&states=listing&end_marker=pickle')
check_shard_GET(
reversed(shard_ranges[:3]), 'a/c',
params='&states=listing&reverse=true&marker=pickle')
# only created, cleaved, active, shrinking shards using updating alias
check_shard_GET(shard_ranges[1:4], 'a/c',
params='&states=updating&end_marker=treacle')
check_shard_GET(
reversed(shard_ranges[1:4]), 'a/c',
params='&states=updating&reverse=true&marker=treacle')
# listing shards don't cover entire namespace so expect an extra filler
extra_shard_range = ShardRange(
'a/c', ts_now, shard_ranges[2].upper, ShardRange.MAX, 0, 0,
state=ShardRange.ACTIVE)
expected = shard_ranges[:3] + [extra_shard_range]
check_shard_GET(expected, 'a/c', params='&states=listing')
check_shard_GET(reversed(expected), 'a/c',
params='&states=listing&reverse=true')
expected = [shard_ranges[2], extra_shard_range]
check_shard_GET(expected, 'a/c',
params='&states=listing&marker=pickle')
check_shard_GET(
reversed(expected), 'a/c',
params='&states=listing&reverse=true&end_marker=pickle')
# updating shards don't cover entire namespace so expect a filler
extra_shard_range = ShardRange(
'a/c', ts_now, shard_ranges[3].upper, ShardRange.MAX, 0, 0,
state=ShardRange.ACTIVE)
expected = shard_ranges[1:4] + [extra_shard_range]
check_shard_GET(expected, 'a/c', params='&states=updating')
check_shard_GET(reversed(expected), 'a/c',
params='&states=updating&reverse=true')
# when no listing shard ranges cover the requested namespace range then
# filler is for entire requested namespace
extra_shard_range = ShardRange(
'a/c', ts_now, 'treacle', ShardRange.MAX, 0, 0,
state=ShardRange.ACTIVE)
check_shard_GET([extra_shard_range], 'a/c',
params='&states=listing&marker=treacle')
check_shard_GET(
[extra_shard_range], 'a/c',
params='&states=listing&reverse=true&end_marker=treacle')
extra_shard_range = ShardRange(
'a/c', ts_now, 'treacle', 'walnut', 0, 0,
state=ShardRange.ACTIVE)
params = '&states=listing&marker=treacle&end_marker=walnut'
check_shard_GET([extra_shard_range], 'a/c', params=params)
params = '&states=listing&reverse=true&marker=walnut' + \
'&end_marker=treacle'
check_shard_GET([extra_shard_range], 'a/c', params=params)
# specific object
check_shard_GET(shard_ranges[1:2], 'a/c', params='&includes=cheese')
check_shard_GET(shard_ranges[1:2], 'a/c', params='&includes=ham')
check_shard_GET(shard_ranges[2:3], 'a/c', params='&includes=pickle')
check_shard_GET(shard_ranges[2:3], 'a/c', params='&includes=salami')
check_shard_GET(shard_ranges[3:4], 'a/c', params='&includes=walnut')
check_shard_GET(shard_ranges[3:4], 'a/c',
params='&includes=walnut&reverse=true')
# with marker
check_shard_GET(shard_ranges[1:], 'a/c', params='&marker=cheese')
check_shard_GET(reversed(shard_ranges[:2]), 'a/c',
params='&marker=cheese&reverse=true')
check_shard_GET(shard_ranges[2:], 'a/c', params='&marker=ham')
check_shard_GET(reversed(shard_ranges[:2]), 'a/c',
params='&marker=ham&reverse=true')
check_shard_GET(shard_ranges[2:], 'a/c', params='&marker=pickle')
check_shard_GET(reversed(shard_ranges[:3]), 'a/c',
params='&marker=pickle&reverse=true')
check_shard_GET(shard_ranges[3:], 'a/c', params='&marker=salami')
check_shard_GET(reversed(shard_ranges[:3]), 'a/c',
params='&marker=salami&reverse=true')
check_shard_GET(shard_ranges[3:], 'a/c', params='&marker=walnut')
check_shard_GET(reversed(shard_ranges[:4]), 'a/c',
params='&marker=walnut&reverse=true')
# with end marker
check_shard_GET(shard_ranges[:2], 'a/c', params='&end_marker=cheese')
check_shard_GET(reversed(shard_ranges[1:]), 'a/c',
params='&end_marker=cheese&reverse=true')
# everything in range 'apple' - 'ham' is <= end_marker of 'ham' so that
# range is not included because end_marker is non-inclusive
check_shard_GET(shard_ranges[:2], 'a/c', params='&end_marker=ham')
check_shard_GET(reversed(shard_ranges[2:]), 'a/c',
params='&end_marker=ham&reverse=true')
check_shard_GET(shard_ranges[:3], 'a/c', params='&end_marker=pickle')
check_shard_GET(reversed(shard_ranges[2:]), 'a/c',
params='&end_marker=pickle&reverse=true')
check_shard_GET(shard_ranges[:3], 'a/c', params='&end_marker=salami')
check_shard_GET(reversed(shard_ranges[3:]), 'a/c',
params='&end_marker=salami&reverse=true')
check_shard_GET(shard_ranges[:4], 'a/c', params='&end_marker=walnut')
check_shard_GET(reversed(shard_ranges[3:]), 'a/c',
params='&end_marker=walnut&reverse=true')
# with marker and end marker
check_shard_GET(shard_ranges[1:2], 'a/c',
params='&marker=cheese&end_marker=egg')
check_shard_GET(shard_ranges[1:2], 'a/c',
params='&end_marker=cheese&marker=egg&reverse=true')
check_shard_GET(shard_ranges[1:3], 'a/c',
params='&marker=egg&end_marker=jam')
check_shard_GET(reversed(shard_ranges[1:3]), 'a/c',
params='&end_marker=egg&marker=jam&reverse=true')
check_shard_GET(shard_ranges[1:4], 'a/c',
params='&marker=cheese&end_marker=walnut')
check_shard_GET(reversed(shard_ranges[1:4]), 'a/c',
params='&end_marker=cheese&marker=walnut&reverse=true')
check_shard_GET(shard_ranges[2:4], 'a/c',
params='&marker=jam&end_marker=walnut')
check_shard_GET(reversed(shard_ranges[2:4]), 'a/c',
params='&end_marker=jam&marker=walnut&reverse=true')
check_shard_GET(shard_ranges[3:4], 'a/c',
params='&marker=toast&end_marker=walnut')
check_shard_GET(shard_ranges[3:4], 'a/c',
params='&end_marker=toast&marker=walnut&reverse=true')
check_shard_GET([], 'a/c',
params='&marker=egg&end_marker=cheese')
check_shard_GET([], 'a/c',
params='&marker=cheese&end_marker=egg&reverse=true')
# now vary the sharding state and check the consequences of sending the
# x-backend-override-shard-name-filter header:
# in unsharded & sharding state the header should be ignored
self.assertEqual('unsharded', broker.get_db_state())
check_shard_GET(
reversed(shard_ranges[:2]), 'a/c',
params='&states=listing&reverse=true&marker=egg')
resp = check_shard_GET_override_filter(
reversed(shard_ranges[:2]), 'a/c', state='unsharded',
params='&states=listing&reverse=true&marker=egg')
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = check_shard_GET_override_filter(
reversed(shard_ranges[:2]), 'a/c', state='sharded',
params='&states=listing&reverse=true&marker=egg')
self.assertIsNone(
resp.headers.get('X-Backend-Override-Shard-Name-Filter'))
ts_epoch = next(ts_iter)
broker.enable_sharding(ts_epoch)
self.assertTrue(broker.set_sharding_state())
check_shard_GET(
reversed(shard_ranges[:2]), 'a/c',
params='&states=listing&reverse=true&marker=egg')
resp = check_shard_GET_override_filter(
reversed(shard_ranges[:2]), 'a/c', state='sharding',
params='&states=listing&reverse=true&marker=egg')
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = check_shard_GET_override_filter(
reversed(shard_ranges[:2]), 'a/c', state='sharded',
params='&states=listing&reverse=true&marker=egg')
self.assertIsNone(
resp.headers.get('X-Backend-Override-Shard-Name-Filter'))
# in sharded state the server *will* override the marker and reverse
# params and return listing shard ranges for entire namespace if
# X-Backend-Override-Shard-Name-Filter == 'sharded'
self.assertTrue(broker.set_sharded_state())
ts_now = next(ts_iter)
with mock_timestamp_now(ts_now):
extra_shard_range = broker.get_own_shard_range()
extra_shard_range.lower = shard_ranges[2].upper
extra_shard_range.upper = ShardRange.MAX
check_shard_GET(
reversed(shard_ranges[:2]), 'a/c',
params='&states=listing&reverse=true&marker=egg')
expected = shard_ranges[:3] + [extra_shard_range]
resp = check_shard_GET_override_filter(
reversed(shard_ranges[:2]), 'a/c', state='sharding',
params='&states=listing&reverse=true&marker=egg')
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = check_shard_GET_override_filter(
expected, 'a/c', state='sharded',
params='&states=listing&reverse=true&marker=egg')
self.assertEqual(
'true', resp.headers.get('X-Backend-Override-Shard-Name-Filter'))
# updating state excludes the first shard which has 'shrinking' state
# but includes the fourth which has 'created' state
extra_shard_range.lower = shard_ranges[3].upper
check_shard_GET(
shard_ranges[1:2], 'a/c',
params='&states=updating&includes=egg')
expected = shard_ranges[1:4] + [extra_shard_range]
resp = check_shard_GET_override_filter(
expected, 'a/c', state='sharded',
params='&states=updating&includes=egg')
self.assertEqual(
'true', resp.headers.get('X-Backend-Override-Shard-Name-Filter'))
# delete a shard range
shard_range = shard_ranges[1]
shard_range.set_deleted(timestamp=next(ts_iter))
self._put_shard_range(shard_range)
self._assert_shard_ranges_equal(shard_ranges[:1] + shard_ranges[2:],
broker.get_shard_ranges())
check_shard_GET(shard_ranges[:1] + shard_ranges[2:], 'a/c')
check_shard_GET(shard_ranges[2:3], 'a/c', params='&includes=jam')
# specify obj, marker or end_marker not in any shard range
check_shard_GET([], 'a/c', params='&includes=cheese')
check_shard_GET([], 'a/c', params='&includes=cheese&reverse=true')
check_shard_GET([], 'a/c', params='&includes=ham')
check_shard_GET(shard_ranges[2:], 'a/c/', params='&marker=cheese')
check_shard_GET(shard_ranges[:1], 'a/c/',
params='&marker=cheese&reverse=true')
check_shard_GET(shard_ranges[:1], 'a/c/', params='&end_marker=cheese')
check_shard_GET(reversed(shard_ranges[2:]), 'a/c/',
params='&end_marker=cheese&reverse=true')
self.assertFalse(self.controller.logger.get_lines_for_level('warning'))
self.assertFalse(self.controller.logger.get_lines_for_level('error'))
def test_GET_shard_ranges_from_compacted_shard(self):
# make a shrunk shard container with two acceptors that overlap with
# the shard's namespace
shard_path = '.shards_a/c_f'
ts_iter = make_timestamp_iter()
ts_now = Timestamp.now() # used when mocking Timestamp.now()
own_shard_range = ShardRange(shard_path, next(ts_iter),
'b', 'f', 100, 1000,
meta_timestamp=next(ts_iter),
state=ShardRange.SHRUNK,
state_timestamp=next(ts_iter),
epoch=next(ts_iter))
shard_ranges = []
for lower, upper in (('a', 'd'), ('d', 'g')):
shard_ranges.append(
ShardRange('.shards_a/c_%s' % upper, next(ts_iter),
lower, upper, 100, 1000,
meta_timestamp=next(ts_iter),
state=ShardRange.ACTIVE,
state_timestamp=next(ts_iter)))
# create container
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank(
'/sda1/p/%s' % shard_path, method='PUT', headers=headers)
self.assertIn(
req.get_response(self.controller).status_int, (201, 202))
# PUT the acceptor shard ranges and own shard range
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Container-Sysmeta-Shard-Root': 'a/c',
'X-Backend-Record-Type': 'shard'}
body = json.dumps(
[dict(sr) for sr in shard_ranges + [own_shard_range]])
req = Request.blank('/sda1/p/%s' % shard_path, method='PUT',
headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
def do_get(params, extra_headers, expected):
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in expected]
headers = {'X-Backend-Record-Type': 'shard'}
headers.update(extra_headers)
req = Request.blank('/sda1/p/%s?format=json%s' %
(shard_path, params), method='GET',
headers=headers)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
return resp
# unsharded shard container...
do_get('', {}, shard_ranges)
do_get('&marker=e', {}, shard_ranges[1:])
do_get('&end_marker=d', {}, shard_ranges[:1])
do_get('&end_marker=k', {}, shard_ranges)
do_get('&marker=b&end_marker=f&states=listing', {}, shard_ranges)
do_get('&marker=b&end_marker=c&states=listing', {}, shard_ranges[:1])
do_get('&marker=b&end_marker=z&states=listing', {}, shard_ranges)
do_get('&states=listing', {}, shard_ranges)
# send X-Backend-Override-Shard-Name-Filter, but db is not yet sharded
# so this has no effect
extra_headers = {'X-Backend-Override-Shard-Name-Filter': 'sharded'}
resp = do_get('', extra_headers, shard_ranges)
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = do_get('&marker=e', extra_headers, shard_ranges[1:])
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = do_get('&end_marker=d', extra_headers, shard_ranges[:1])
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
resp = do_get('&states=listing', {}, shard_ranges)
self.assertNotIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
# set broker to sharded state so X-Backend-Override-Shard-Name-Filter
# does have effect
shard_broker = self.controller._get_container_broker(
'sda1', 'p', '.shards_a', 'c_f')
self.assertTrue(shard_broker.set_sharding_state())
self.assertTrue(shard_broker.set_sharded_state())
resp = do_get('', extra_headers, shard_ranges)
self.assertIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
self.assertTrue(resp.headers['X-Backend-Override-Shard-Name-Filter'])
resp = do_get('&marker=e', extra_headers, shard_ranges)
self.assertIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
self.assertTrue(resp.headers['X-Backend-Override-Shard-Name-Filter'])
resp = do_get('&end_marker=d', extra_headers, shard_ranges)
self.assertIn('X-Backend-Override-Shard-Name-Filter', resp.headers)
self.assertTrue(resp.headers['X-Backend-Override-Shard-Name-Filter'])
def test_GET_shard_ranges_using_state_aliases(self):
# make a shard container
ts_iter = make_timestamp_iter()
shard_ranges = []
lower = ''
for state in sorted(ShardRange.STATES.keys()):
upper = str(state)
shard_ranges.append(
ShardRange('.shards_a/c_%s' % upper, next(ts_iter),
lower, upper, state * 100, state * 1000,
meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter)))
lower = upper
def do_test(root_path, path, params, expected_states):
expected = [
sr for sr in shard_ranges if sr.state in expected_states]
own_shard_range = ShardRange(path, next(ts_iter), '', '',
state=ShardRange.ACTIVE)
expected.append(own_shard_range.copy(lower=expected[-1].upper))
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in expected]
headers = {'X-Timestamp': next(ts_iter).normal}
# create container
req = Request.blank(
'/sda1/p/%s' % path, method='PUT', headers=headers)
self.assertIn(
req.get_response(self.controller).status_int, (201, 202))
# PUT some shard ranges
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Container-Sysmeta-Shard-Root': root_path,
'X-Backend-Record-Type': 'shard'}
body = json.dumps(
[dict(sr) for sr in shard_ranges + [own_shard_range]])
req = Request.blank(
'/sda1/p/%s' % path, method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
req = Request.blank('/sda1/p/%s?format=json%s' %
(path, params), method='GET',
headers={'X-Backend-Record-Type': 'shard'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
# root's shard ranges for listing
root_path = container_path = 'a/c'
params = '&states=listing'
expected_states = [
ShardRange.CLEAVED, ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING]
do_test(root_path, container_path, params, expected_states)
# shard's shard ranges for listing
container_path = '.shards_a/c'
params = '&states=listing'
do_test(root_path, container_path, params, expected_states)
# root's shard ranges for updating
params = '&states=updating'
expected_states = [
ShardRange.CREATED, ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING]
container_path = root_path
do_test(root_path, container_path, params, expected_states)
# shard's shard ranges for updating
container_path = '.shards_a/c'
do_test(root_path, container_path, params, expected_states)
def test_GET_shard_ranges_include_deleted(self):
# make a shard container
ts_iter = make_timestamp_iter()
ts_now = Timestamp.now() # used when mocking Timestamp.now()
shard_ranges = []
lower = ''
for state in sorted(ShardRange.STATES.keys()):
upper = str(state)
shard_ranges.append(
ShardRange('.shards_a/c_%s' % upper, next(ts_iter),
lower, upper, state * 100, state * 1000,
meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter)))
lower = upper
# create container
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers)
self.assertIn(
req.get_response(self.controller).status_int, (201, 202))
# PUT some shard ranges
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Backend-Record-Type': 'shard'}
body = json.dumps([dict(sr) for sr in shard_ranges])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
def do_test(include_deleted, expected):
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in expected]
headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Include-Deleted': str(include_deleted)}
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers=headers)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
do_test(False, shard_ranges)
do_test(True, shard_ranges)
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Backend-Record-Type': 'shard'}
for sr in shard_ranges[::2]:
sr.set_deleted(timestamp=next(ts_iter))
body = json.dumps([dict(sr) for sr in shard_ranges])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self._assert_shard_ranges_equal(
shard_ranges[1::2], broker.get_shard_ranges())
do_test(False, shard_ranges[1::2])
do_test(True, shard_ranges)
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Backend-Record-Type': 'shard'}
for sr in shard_ranges[1::2]:
sr.set_deleted(timestamp=next(ts_iter))
body = json.dumps([dict(sr) for sr in shard_ranges])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
self.assertFalse(broker.get_shard_ranges())
do_test(False, [])
do_test(True, shard_ranges)
def test_GET_shard_ranges_errors(self):
# verify that x-backend-record-type is not included in error responses
ts_iter = make_timestamp_iter()
ts_now = Timestamp.now() # used when mocking Timestamp.now()
shard_ranges = []
lower = ''
for state in sorted(ShardRange.STATES.keys()):
upper = str(state)
shard_ranges.append(
ShardRange('.shards_a/c_%s' % upper, next(ts_iter),
lower, upper, state * 100, state * 1000,
meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter)))
lower = upper
# create container
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers)
self.assertIn(
req.get_response(self.controller).status_int, (201, 202))
# PUT some shard ranges
headers = {'X-Timestamp': next(ts_iter).normal,
'X-Backend-Record-Type': 'shard'}
body = json.dumps([dict(sr) for sr in shard_ranges])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
def do_test(params, expected_status):
params['format'] = 'json'
headers = {'X-Backend-Record-Type': 'shard'}
req = Request.blank('/sda1/p/a/c', method='GET',
headers=headers, params=params)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, expected_status)
self.assertEqual(resp.content_type, 'text/html')
self.assertNotIn('X-Backend-Record-Type', resp.headers)
self.assertNotIn('X-Backend-Sharding-State', resp.headers)
self.assertNotIn('X-Container-Object-Count', resp.headers)
self.assertNotIn('X-Container-Bytes-Used', resp.headers)
self.assertNotIn('X-Timestamp', resp.headers)
self.assertNotIn('X-PUT-Timestamp', resp.headers)
do_test({'states': 'bad'}, 400)
do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)}, 412)
with mock.patch('swift.container.server.check_drive',
side_effect=ValueError('sda1 is not mounted')):
do_test({}, 507)
# delete the container
req = Request.blank('/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': next(ts_iter).normal})
self.assertEqual(204, req.get_response(self.controller).status_int)
do_test({'states': 'bad'}, 404)
def test_GET_shard_ranges_auditing(self):
# verify that states=auditing causes own shard range to be included
def put_shard_ranges(shard_ranges):
headers = {'X-Timestamp': next(self.ts).normal,
'X-Backend-Record-Type': 'shard'}
body = json.dumps([dict(sr) for sr in shard_ranges])
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
def do_test(ts_now, extra_params):
headers = {'X-Backend-Record-Type': 'shard',
'X-Backend-Include-Deleted': 'True'}
params = {'format': 'json'}
if extra_params:
params.update(extra_params)
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers=headers, params=params)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual('shard', resp.headers['X-Backend-Record-Type'])
return resp
# initially not all shards are shrinking and root is sharded
own_sr = ShardRange('a/c', next(self.ts), '', '',
state=ShardRange.SHARDED)
shard_bounds = [('', 'f', ShardRange.SHRUNK, True),
('f', 't', ShardRange.SHRINKING, False),
('t', '', ShardRange.ACTIVE, False)]
shard_ranges = [
ShardRange('.shards_a/_%s' % upper, next(self.ts),
lower, upper, state=state, deleted=deleted)
for (lower, upper, state, deleted) in shard_bounds]
overlap = ShardRange('.shards_a/c_bad', next(self.ts), '', 'f',
state=ShardRange.FOUND)
# create container and PUT some shard ranges
headers = {'X-Timestamp': next(self.ts).normal}
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers=headers)
self.assertIn(
req.get_response(self.controller).status_int, (201, 202))
put_shard_ranges(shard_ranges + [own_sr, overlap])
# do *not* expect own shard range in default case (no states param)
ts_now = next(self.ts)
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in [overlap] + shard_ranges]
resp = do_test(ts_now, {})
self.assertEqual(expected, json.loads(resp.body))
# expect own shard range to be included when states=auditing
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges + [own_sr]]
resp = do_test(ts_now, {'states': 'auditing'})
self.assertEqual(expected, json.loads(resp.body))
# expect own shard range to be included, marker/end_marker respected
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges[1:2] + [own_sr]]
resp = do_test(ts_now, {'marker': 'f', 'end_marker': 't',
'states': 'auditing'})
self.assertEqual(expected, json.loads(resp.body))
# update shards to all shrinking and root to active
shard_ranges[-1].update_state(ShardRange.SHRINKING, next(self.ts))
own_sr.update_state(ShardRange.ACTIVE, next(self.ts))
put_shard_ranges(shard_ranges + [own_sr])
# do *not* expect own shard range in default case (no states param)
ts_now = next(self.ts)
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in [overlap] + shard_ranges]
resp = do_test(ts_now, {})
self.assertEqual(expected, json.loads(resp.body))
# expect own shard range to be included when states=auditing
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges[:2] + [own_sr] + shard_ranges[2:]]
resp = do_test(ts_now, {'states': 'auditing'})
self.assertEqual(expected, json.loads(resp.body))
# expect own shard range to be included, marker/end_marker respected
expected = [dict(sr, last_modified=sr.timestamp.isoformat)
for sr in shard_ranges[1:2] + [own_sr]]
resp = do_test(ts_now, {'marker': 'f', 'end_marker': 't',
'states': 'auditing'})
self.assertEqual(expected, json.loads(resp.body))
def test_GET_auto_record_type(self):
# make a container
ts_iter = make_timestamp_iter()
ts_now = Timestamp.now() # used when mocking Timestamp.now()
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers)
self.assertEqual(201, req.get_response(self.controller).status_int)
# PUT some objects
objects = [{'name': 'obj_%d' % i,
'x-timestamp': next(ts_iter).normal,
'x-content-type': 'text/plain',
'x-etag': 'etag_%d' % i,
'x-size': 1024 * i
} for i in range(2)]
for obj in objects:
req = Request.blank('/sda1/p/a/c/%s' % obj['name'], method='PUT',
headers=obj)
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
# PUT some shard ranges
shard_bounds = [('', 'm', ShardRange.CLEAVED),
('m', '', ShardRange.CREATED)]
shard_ranges = [
ShardRange('.sharded_a/_%s' % upper, next(ts_iter),
lower, upper,
i * 100, i * 1000, meta_timestamp=next(ts_iter),
state=state, state_timestamp=next(ts_iter))
for i, (lower, upper, state) in enumerate(shard_bounds)]
for shard_range in shard_ranges:
self._put_shard_range(shard_range)
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
def assert_GET_objects(req, expected_objects):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
expected = [
dict(hash=obj['x-etag'], bytes=obj['x-size'],
content_type=obj['x-content-type'],
last_modified=Timestamp(obj['x-timestamp']).isoformat,
name=obj['name']) for obj in expected_objects]
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual(
'object', resp.headers.pop('X-Backend-Record-Type'))
self.assertEqual(
str(POLICIES.default.idx),
resp.headers.pop('X-Backend-Storage-Policy-Index'))
self.assertEqual(
str(POLICIES.default.idx),
resp.headers.pop('X-Backend-Record-Storage-Policy-Index'))
resp.headers.pop('Content-Length')
return resp
def assert_GET_shard_ranges(req, expected_shard_ranges):
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
expected = [
dict(sr, last_modified=Timestamp(sr.timestamp).isoformat)
for sr in expected_shard_ranges]
self.assertEqual(expected, json.loads(resp.body))
self.assertIn('X-Backend-Record-Type', resp.headers)
self.assertEqual(
'shard', resp.headers.pop('X-Backend-Record-Type'))
self.assertEqual(
str(POLICIES.default.idx),
resp.headers.pop('X-Backend-Storage-Policy-Index'))
self.assertNotIn('X-Backend-Record-Storage-Policy-Index',
resp.headers)
resp.headers.pop('Content-Length')
return resp
# unsharded
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'auto'})
resp = assert_GET_objects(req, objects)
headers = resp.headers
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'shard'})
resp = assert_GET_shard_ranges(req, shard_ranges)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'object'})
resp = assert_GET_objects(req, objects)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET')
resp = assert_GET_objects(req, objects)
self.assertEqual(headers, resp.headers)
# move to sharding state
broker.enable_sharding(next(ts_iter))
self.assertTrue(broker.set_sharding_state())
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'auto'})
resp = assert_GET_shard_ranges(req, shard_ranges)
headers = resp.headers
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'shard'})
resp = assert_GET_shard_ranges(req, shard_ranges)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'object'})
resp = assert_GET_objects(req, objects)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET')
resp = assert_GET_objects(req, objects)
self.assertEqual(headers, resp.headers)
# limit is applied to objects but not shard ranges
req = Request.blank('/sda1/p/a/c?format=json&limit=1', method='GET',
headers={'X-Backend-Record-Type': 'auto'})
resp = assert_GET_shard_ranges(req, shard_ranges)
headers = resp.headers
req = Request.blank('/sda1/p/a/c?format=json&limit=1', method='GET',
headers={'X-Backend-Record-Type': 'shard'})
resp = assert_GET_shard_ranges(req, shard_ranges)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json&limit=1', method='GET',
headers={'X-Backend-Record-Type': 'object'})
resp = assert_GET_objects(req, objects[:1])
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json&limit=1', method='GET')
resp = assert_GET_objects(req, objects[:1])
self.assertEqual(headers, resp.headers)
# move to sharded state
self.assertTrue(broker.set_sharded_state())
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'auto'})
resp = assert_GET_shard_ranges(req, shard_ranges)
headers = resp.headers
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'shard'})
resp = assert_GET_shard_ranges(req, shard_ranges)
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET',
headers={'X-Backend-Record-Type': 'object'})
resp = assert_GET_objects(req, [])
self.assertEqual(headers, resp.headers)
req = Request.blank('/sda1/p/a/c?format=json', method='GET')
resp = assert_GET_objects(req, [])
self.assertEqual(headers, resp.headers)
def test_PUT_GET_to_sharding_container(self):
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
ts_iter = make_timestamp_iter()
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers)
self.assertEqual(201, req.get_response(self.controller).status_int)
def do_update(name, timestamp=None, headers=None):
# Make a PUT request to container controller to update an object
timestamp = timestamp or next(ts_iter)
headers = headers or {}
headers.update({'X-Timestamp': timestamp.internal,
'X-Size': 17,
'X-Content-Type': 'text/plain',
'X-Etag': 'fake etag'})
req = Request.blank(
'/sda1/p/a/c/%s' % name, method='PUT', headers=headers)
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
def get_api_listing():
req = Request.blank(
'/sda1/p/a/c', method='GET', params={'format': 'json'})
resp = req.get_response(self.controller)
self.assertEqual(200, resp.status_int)
return [obj['name'] for obj in json.loads(resp.body)]
def assert_broker_rows(broker, expected_names, expected_max_row):
self.assertEqual(expected_max_row, broker.get_max_row())
with broker.get() as conn:
curs = conn.execute('''
SELECT * FROM object WHERE ROWID > -1 ORDER BY ROWID ASC
''')
actual = [r[1] for r in curs]
self.assertEqual(expected_names, actual)
do_update('unsharded')
self.assertEqual(['unsharded'], get_api_listing())
assert_broker_rows(broker, ['unsharded'], 1)
# move container to sharding state
broker.enable_sharding(next(ts_iter))
self.assertTrue(broker.set_sharding_state())
assert_broker_rows(broker.get_brokers()[0], ['unsharded'], 1)
assert_broker_rows(broker.get_brokers()[1], [], 1)
# add another update - should not merge into the older db and therefore
# not appear in api listing
do_update('sharding')
self.assertEqual(['unsharded'], get_api_listing())
assert_broker_rows(broker.get_brokers()[0], ['unsharded'], 1)
assert_broker_rows(broker.get_brokers()[1], ['sharding'], 2)
orig_lister = swift.container.backend.ContainerBroker.list_objects_iter
def mock_list_objects_iter(*args, **kwargs):
# cause an update to land in the pending file after it has been
# flushed by get_info() calls in the container PUT method, but
# before it is flushed by the call to list_objects_iter
do_update('racing_update')
return orig_lister(*args, **kwargs)
with mock.patch(
'swift.container.backend.ContainerBroker.list_objects_iter',
mock_list_objects_iter):
listing = get_api_listing()
self.assertEqual(['unsharded'], listing)
assert_broker_rows(broker.get_brokers()[0], ['unsharded'], 1)
assert_broker_rows(broker.get_brokers()[1], ['sharding'], 2)
# next listing will flush pending file
listing = get_api_listing()
self.assertEqual(['unsharded'], listing)
assert_broker_rows(broker.get_brokers()[0], ['unsharded'], 1)
assert_broker_rows(broker.get_brokers()[1],
['sharding', 'racing_update'], 3)
def _check_object_update_redirected_to_shard(self, method):
expected_status = 204 if method == 'DELETE' else 201
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
ts_iter = make_timestamp_iter()
headers = {'X-Timestamp': next(ts_iter).normal}
req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers)
self.assertEqual(201, req.get_response(self.controller).status_int)
def do_update(name, timestamp=None, headers=None):
# Make a PUT request to container controller to update an object
timestamp = timestamp or next(ts_iter)
headers = headers or {}
headers.update({'X-Timestamp': timestamp.internal,
'X-Size': 17,
'X-Content-Type': 'text/plain',
'X-Etag': 'fake etag'})
req = Request.blank(
'/sda1/p/a/c/%s' % name, method=method, headers=headers)
self._update_object_put_headers(req)
return req.get_response(self.controller)
def get_listing(broker_index):
# index -1 is always the freshest db
sub_broker = broker.get_brokers()[broker_index]
return sub_broker.get_objects()
def assert_not_redirected(obj_name, timestamp=None, headers=None):
resp = do_update(obj_name, timestamp=timestamp, headers=headers)
self.assertEqual(expected_status, resp.status_int)
self.assertNotIn('Location', resp.headers)
self.assertNotIn('X-Backend-Redirect-Timestamp', resp.headers)
def assert_redirected(obj_name, shard_range, headers=None):
resp = do_update(obj_name, headers=headers)
self.assertEqual(301, resp.status_int)
self.assertEqual('/%s/%s' % (shard_range.name, obj_name),
resp.headers['Location'])
self.assertEqual(shard_range.timestamp.internal,
resp.headers['X-Backend-Redirect-Timestamp'])
# sanity check
ts_bashful_orig = next(ts_iter)
mocked_fn = 'swift.container.backend.ContainerBroker.get_shard_ranges'
with mock.patch(mocked_fn) as mock_get_shard_ranges:
assert_not_redirected('bashful', ts_bashful_orig)
mock_get_shard_ranges.assert_not_called()
shard_ranges = {
'dopey': ShardRange(
'.sharded_a/sr_dopey', next(ts_iter), '', 'dopey'),
'happy': ShardRange(
'.sharded_a/sr_happy', next(ts_iter), 'dopey', 'happy'),
'': ShardRange('.sharded_a/sr_', next(ts_iter), 'happy', '')
}
# start with only the middle shard range
self._put_shard_range(shard_ranges['happy'])
# db not yet sharding but shard ranges exist
sr_happy = shard_ranges['happy']
redirect_states = (
ShardRange.CREATED, ShardRange.CLEAVED, ShardRange.ACTIVE,
ShardRange.SHARDING)
headers = {'X-Backend-Accept-Redirect': 'true'}
for state in ShardRange.STATES:
self.assertTrue(
sr_happy.update_state(state,
state_timestamp=next(ts_iter)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
obj_name = 'grumpy%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
else:
assert_not_redirected(obj_name, headers=headers)
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
obj_name = 'grumpy%s_no_header' % state
with mock.patch(mocked_fn) as mock_get_shard_ranges:
assert_not_redirected(obj_name)
mock_get_shard_ranges.assert_not_called()
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
# set broker to sharding state
broker.enable_sharding(next(ts_iter))
self.assertTrue(broker.set_sharding_state())
for state in ShardRange.STATES:
self.assertTrue(
sr_happy.update_state(state,
state_timestamp=next(ts_iter)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
obj_name = 'grumpier%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
else:
assert_not_redirected(obj_name, headers=headers)
# update goes to fresh db, misplaced
self.assertIn(
obj_name, [obj['name'] for obj in get_listing(-1)])
self.assertNotIn(
obj_name, [obj['name'] for obj in get_listing(0)])
obj_name = 'grumpier%s_no_header' % state
with mock.patch(mocked_fn) as mock_get_shard_ranges:
assert_not_redirected(obj_name)
mock_get_shard_ranges.assert_not_called()
self.assertIn(
obj_name, [obj['name'] for obj in get_listing(-1)])
# update is misplaced, not in retiring db
self.assertNotIn(
obj_name, [obj['name'] for obj in get_listing(0)])
# no shard for this object yet so it is accepted by root container
# and stored in misplaced objects...
assert_not_redirected('dopey', timestamp=next(ts_iter))
self.assertIn('dopey', [obj['name'] for obj in get_listing(-1)])
self.assertNotIn('dopey', [obj['name'] for obj in get_listing(0)])
# now PUT the first shard range
sr_dopey = shard_ranges['dopey']
sr_dopey.update_state(ShardRange.CLEAVED,
state_timestamp=next(ts_iter))
self._put_shard_range(sr_dopey)
for state in ShardRange.STATES:
self.assertTrue(
sr_happy.update_state(state,
state_timestamp=next(ts_iter)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
obj_name = 'dopey%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(0)])
else:
assert_not_redirected(obj_name, headers=headers)
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(0)])
obj_name = 'dopey%s_no_header' % state
with mock.patch(mocked_fn) as mock_get_shard_ranges:
assert_not_redirected(obj_name)
mock_get_shard_ranges.assert_not_called()
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(0)])
# further updates to bashful and dopey are now redirected...
assert_redirected('bashful', sr_dopey, headers=headers)
assert_redirected('dopey', sr_dopey, headers=headers)
# ...and existing updates in this container are *not* updated
self.assertEqual([ts_bashful_orig.internal],
[obj['created_at'] for obj in get_listing(0)
if obj['name'] == 'bashful'])
# set broker to sharded state
self.assertTrue(broker.set_sharded_state())
for state in ShardRange.STATES:
self.assertTrue(
sr_happy.update_state(state,
state_timestamp=next(ts_iter)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
obj_name = 'grumpiest%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
self.assertNotIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
else:
assert_not_redirected(obj_name, headers=headers)
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
obj_name = 'grumpiest%s_no_header' % state
with mock.patch(mocked_fn) as mock_get_shard_ranges:
assert_not_redirected(obj_name)
mock_get_shard_ranges.assert_not_called()
self.assertIn(obj_name,
[obj['name'] for obj in get_listing(-1)])
def test_PUT_object_update_redirected_to_shard(self):
self._check_object_update_redirected_to_shard('PUT')
def test_DELETE_object_update_redirected_to_shard(self):
self._check_object_update_redirected_to_shard('DELETE')
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(
resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"),
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0)))
self.assertEqual(json.loads(resp.body), json_body)
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEqual(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEqual(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_non_ascii(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
noodles = [u"Spätzle", u"ラーメン"]
for n in noodles:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % bytes_to_wsgi(n.encode("utf-8")),
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
json_body = [{"name": noodles[0],
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": noodles[1],
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
# JSON
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200) # sanity check
self.assertEqual(json.loads(resp.body), json_body)
# Plain text
text_body = u''.join(n + u"\n" for n in noodles).encode('utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=text',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200) # sanity check
self.assertEqual(resp.body, text_body)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
plain_body = b'0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(
resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"),
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0)))
self.assertEqual(resp.body, plain_body)
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEqual(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEqual(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEqual(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.body), json_body)
self.assertEqual(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
xml_body = b'<?xml version="1.0" encoding="UTF-8"?>\n' \
b'<container name="xmlc">' \
b'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
b'<content_type>text/plain</content_type>' \
b'<last_modified>1970-01-01T00:00:01.000000' \
b'</last_modified></object>' \
b'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
b'<content_type>text/plain</content_type>' \
b'<last_modified>1970-01-01T00:00:01.000000' \
b'</last_modified></object>' \
b'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
b'<content_type>text/plain</content_type>' \
b'<last_modified>1970-01-01T00:00:01.000000' \
b'</last_modified></object>' \
b'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(
resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"),
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0)))
self.assertEqual(resp.body, xml_body)
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEqual(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEqual(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEqual(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/xml')
self.assertEqual(resp.body, xml_body)
def test_GET_invalid_accept(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'},
headers={'Accept': 'application/plain;q'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.body, b'Invalid Accept header')
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split(b'\n')
self.assertEqual(result, [b'2', b''])
# test limit with end_marker
req = Request.blank('/sda1/p/a/c?limit=2&end_marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split(b'\n')
self.assertEqual(result, [b'0', b''])
# test limit, reverse with end_marker
req = Request.blank('/sda1/p/a/c?limit=2&end_marker=1&reverse=True',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split(b'\n')
self.assertEqual(result, [b'2', b''])
# test marker > end_marker
req = Request.blank('/sda1/p/a/c?marker=2&end_marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split(b'\n')
self.assertEqual(result, [b''])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
b'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': bytes_to_wsgi(ctype),
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
result = [x['content_type'] for x in json.loads(resp.body)]
self.assertEqual(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_swift_bytes_in_content_type(self):
# create container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
# regular object update
ctype = 'text/plain; charset="utf-8"'
req = Request.blank(
'/sda1/p/a/c/o1', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 99})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# slo object update
ctype = 'text/plain; charset="utf-8"; swift_bytes=12345678'
req = Request.blank(
'/sda1/p/a/c/o2', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 99})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# verify listing
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
listing = json.loads(resp.body)
self.assertEqual(2, len(listing))
self.assertEqual('text/plain;charset="utf-8"',
listing[0]['content_type'])
self.assertEqual(99, listing[0]['bytes'])
self.assertEqual('text/plain;charset="utf-8"',
listing[1]['content_type'])
self.assertEqual(12345678, listing[1]['bytes'])
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', method='GET')
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
@patch_policies([
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
StoragePolicy(2, name='du'),
])
def test_GET_objects_of_different_policies(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
resp_policy_idx = resp.headers['X-Backend-Storage-Policy-Index']
self.assertEqual(resp_policy_idx, str(POLICIES.default.idx))
pol_def_objs = ['obj_default_%d' % i for i in range(11)]
pol_1_objs = ['obj_1_%d' % i for i in range(10)]
# fill the container
for obj in pol_def_objs:
req = Request.blank(
'/sda1/p/a/c/%s' % obj,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
for obj in pol_1_objs:
req = Request.blank(
'/sda1/p/a/c/%s' % obj,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0,
'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': 1})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
expected_pol_def_objs = [o.encode('utf8') for o in pol_def_objs]
expected_pol_1_objs = [o.encode('utf8') for o in pol_1_objs]
# By default the container server will return objects belonging to
# the brokers storage policy
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
result = [o for o in resp.body.split(b'\n') if o]
self.assertEqual(len(result), 11)
self.assertEqual(sorted(result), sorted(expected_pol_def_objs))
self.assertIn('X-Backend-Storage-Policy-Index', resp.headers)
self.assertEqual('0', resp.headers['X-Backend-Storage-Policy-Index'])
self.assertEqual('0',
resp.headers['X-Backend-Record-Storage-Policy-Index'])
# If we specify the policy 0 idx we should get the same
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.headers['X-Backend-Storage-Policy-Index'] = POLICIES.default.idx
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
result = [o for o in resp.body.split(b'\n') if o]
self.assertEqual(len(result), 11)
self.assertEqual(sorted(result), sorted(expected_pol_def_objs))
self.assertIn('X-Backend-Storage-Policy-Index', resp.headers)
self.assertEqual('0', resp.headers['X-Backend-Storage-Policy-Index'])
self.assertEqual('0',
resp.headers['X-Backend-Record-Storage-Policy-Index'])
# And if we specify a different idx we'll get objects for that policy
# and the X-Backend-Record-Storage-Policy-Index letting us know the
# policy for which these objects came from, if it differs from the
# policy stored in the DB.
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.headers['X-Backend-Storage-Policy-Index'] = 1
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
result = [o for o in resp.body.split(b'\n') if o]
self.assertEqual(len(result), 10)
self.assertEqual(sorted(result), sorted(expected_pol_1_objs))
self.assertIn('X-Backend-Storage-Policy-Index', resp.headers)
self.assertEqual('0', resp.headers['X-Backend-Storage-Policy-Index'])
self.assertEqual('1',
resp.headers['X-Backend-Record-Storage-Policy-Index'])
# And an index that the broker doesn't have any objects for
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.headers['X-Backend-Storage-Policy-Index'] = 2
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
result = [o for o in resp.body.split(b'\n') if o]
self.assertEqual(len(result), 0)
self.assertFalse(result)
self.assertIn('X-Backend-Storage-Policy-Index', resp.headers)
self.assertEqual('0', resp.headers['X-Backend-Storage-Policy-Index'])
self.assertEqual('2',
resp.headers['X-Backend-Record-Storage-Policy-Index'])
# And an index that doesn't exist in POLICIES
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.headers['X-Backend-Storage-Policy-Index'] = 3
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split(b'\n')
self.assertEqual(result, [b'0', b'1', b''])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.body.split(b'\n'), [b'a1', b'a2', b'a3', b''])
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_multichar_delimiter(self):
self.maxDiff = None
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US~~TX~~A', 'US~~TX~~B', 'US~~OK~~A', 'US~~OK~~B',
'US~~OK~Tulsa~~A', 'US~~OK~Tulsa~~B',
'US~~UT~~A', 'US~~UT~~~B'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~OK~Tulsa~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~OK~Tulsa~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"subdir": "US~~UT~~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"subdir": "US~~UT~~~"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"name": "US~~UT~~~B"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"}])
def _report_objects(self, path, objects):
req = Request.blank(path, method='PUT', headers={
'x-timestamp': next(self.ts).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2, resp.body)
for obj in objects:
obj_path = path + '/%s' % obj['name']
req = Request.blank(obj_path, method='PUT', headers={
'X-Timestamp': obj['timestamp'].internal,
'X-Size': obj['bytes'],
'X-Content-Type': obj['content_type'],
'X-Etag': obj['hash'],
})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2, resp.body)
def _expected_listing(self, objects):
return [dict(
last_modified=o['timestamp'].isoformat, **{
k: v for k, v in o.items()
if k != 'timestamp'
}) for o in sorted(objects, key=lambda o: o['name'])]
def test_listing_with_reserved(self):
objects = [{
'name': get_reserved_name('null', 'test01'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}]
path = '/sda1/p/a/%s' % get_reserved_name('null')
self._report_objects(path, objects)
req = Request.blank(path, headers={'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path, headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body),
self._expected_listing(objects))
def test_delimiter_with_reserved(self):
objects = [{
'name': get_reserved_name('null', 'test01'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'test02'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}]
path = '/sda1/p/a/%s' % get_reserved_name('null')
self._report_objects(path, objects)
req = Request.blank(path + '?prefix=%s&delimiter=l' %
get_reserved_name('nul'), headers={
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path + '?prefix=%s&delimiter=l' %
get_reserved_name('nul'), headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [{
'subdir': '%s' % get_reserved_name('null')}])
req = Request.blank(path + '?prefix=%s&delimiter=%s' % (
get_reserved_name('nul'), get_reserved_name('')),
headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [{
'subdir': '%s' % get_reserved_name('null', '')}])
def test_markers_with_reserved(self):
objects = [{
'name': get_reserved_name('null', 'test01'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'test02'),
'bytes': 10,
'content_type': 'application/octet-stream',
'hash': '912ec803b2ce49e4a541068d495ab570',
'timestamp': next(self.ts),
}]
path = '/sda1/p/a/%s' % get_reserved_name('null')
self._report_objects(path, objects)
req = Request.blank(path + '?marker=%s' %
get_reserved_name('null', ''), headers={
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path + '?marker=%s' %
get_reserved_name('null', ''), headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body),
self._expected_listing(objects))
req = Request.blank(path + '?marker=%s' %
quote(json.loads(resp.body)[0]['name']), headers={
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path + '?marker=%s' %
quote(self._expected_listing(objects)[0]['name']),
headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body),
self._expected_listing(objects)[1:])
def test_prefix_with_reserved(self):
objects = [{
'name': get_reserved_name('null', 'test01'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'test02'),
'bytes': 10,
'content_type': 'application/octet-stream',
'hash': '912ec803b2ce49e4a541068d495ab570',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'foo'),
'bytes': 12,
'content_type': 'application/octet-stream',
'hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('nullish'),
'bytes': 13,
'content_type': 'application/octet-stream',
'hash': '37b51d194a7513e45b56f6524f2d51f2',
'timestamp': next(self.ts),
}]
path = '/sda1/p/a/%s' % get_reserved_name('null')
self._report_objects(path, objects)
req = Request.blank(path + '?prefix=%s' %
get_reserved_name('null', 'test'), headers={
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path + '?prefix=%s' %
get_reserved_name('null', 'test'), headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body),
self._expected_listing(objects[:2]))
def test_prefix_and_delim_with_reserved(self):
objects = [{
'name': get_reserved_name('null', 'test01'),
'bytes': 8,
'content_type': 'application/octet-stream',
'hash': '70c1db56f301c9e337b0099bd4174b28',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'test02'),
'bytes': 10,
'content_type': 'application/octet-stream',
'hash': '912ec803b2ce49e4a541068d495ab570',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('null', 'foo'),
'bytes': 12,
'content_type': 'application/octet-stream',
'hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
'timestamp': next(self.ts),
}, {
'name': get_reserved_name('nullish'),
'bytes': 13,
'content_type': 'application/octet-stream',
'hash': '37b51d194a7513e45b56f6524f2d51f2',
'timestamp': next(self.ts),
}]
path = '/sda1/p/a/%s' % get_reserved_name('null')
self._report_objects(path, objects)
req = Request.blank(path + '?prefix=%s&delimiter=%s' % (
get_reserved_name('null'), get_reserved_name()), headers={
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
self.assertEqual(json.loads(resp.body), [])
req = Request.blank(path + '?prefix=%s&delimiter=%s' % (
get_reserved_name('null'), get_reserved_name()), headers={
'X-Backend-Allow-Reserved-Names': 'true',
'Accept': 'application/json'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200, resp.body)
expected = [{'subdir': get_reserved_name('null', '')}] + \
self._expected_listing(objects)[-1:]
self.assertEqual(json.loads(resp.body), expected)
def test_GET_delimiter_non_ascii(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for obj_name in [u"a/❥/1", u"a/❥/2", u"a/ꙮ/1", u"a/ꙮ/2"]:
req = Request.blank(
'/sda1/p/a/c/%s' % bytes_to_wsgi(obj_name.encode('utf-8')),
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# JSON
req = Request.blank(
'/sda1/p/a/c?prefix=a/&delimiter=/&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": u"a/❥/"},
{"subdir": u"a/ꙮ/"}])
# Plain text
req = Request.blank(
'/sda1/p/a/c?prefix=a/&delimiter=/&format=text',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.body, u"a/❥/\na/ꙮ/\n".encode("utf-8"))
def test_GET_leading_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', '-UK', '-CH'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "-"},
{"subdir": "US-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
resp.body, b'<?xml version="1.0" encoding="UTF-8"?>'
b'\n<container name="c"><subdir name="US-OK-">'
b'<name>US-OK-</name></subdir>'
b'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
b'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assertTrue(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assertTrue(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEqual(six.text_type(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assertTrue(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEqual(six.text_type(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_through_call(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(status, headers):
outbuf.writelines(status)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(status, headers):
outbuf.writelines(status)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(status, headers):
outbuf.writelines(status)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c\xd8\x3e%20/%',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(status, headers):
outbuf.writelines(status)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(status, headers):
outbuf.writelines(status)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
method='GET')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format', 'delimiter'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
def do_test(expected_status, path, extra_headers=None, body=None):
headers = {'x-timestamp': Timestamp(1).internal,
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
if extra_headers:
headers.update(extra_headers)
req = Request.blank('/sda1/p/' + path,
environ={'REQUEST_METHOD': 'PUT'},
headers=headers, body=body)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, expected_status)
do_test(404, 'a/c/o')
do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'no'})
do_test(201, '.a/c/o')
do_test(404, 'a/.c/o')
do_test(404, 'a/c/.o')
do_test(201, 'a/c/o', {'X-Backend-Auto-Create': 'yes'})
do_test(404, '.shards_a/c/o')
create_shard_headers = {
'X-Backend-Record-Type': 'shard',
'X-Backend-Storage-Policy-Index': '0'}
do_test(404, '.shards_a/c', create_shard_headers, '[]')
create_shard_headers['X-Backend-Auto-Create'] = 't'
do_test(201, '.shards_a/c', create_shard_headers, '[]')
def test_delete_auto_create(self):
def do_test(expected_status, path, extra_headers=None):
headers = {'x-timestamp': Timestamp(1).internal}
if extra_headers:
headers.update(extra_headers)
req = Request.blank('/sda1/p/' + path,
environ={'REQUEST_METHOD': 'DELETE'},
headers=headers)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, expected_status)
do_test(404, 'a/c/o')
do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'false'})
do_test(204, '.a/c/o')
do_test(404, 'a/.c/o')
do_test(404, 'a/.c/.o')
do_test(404, '.shards_a/c/o')
do_test(204, 'a/c/o', {'X-Backend-Auto-Create': 'true'})
do_test(204, '.shards_a/c/o', {'X-Backend-Auto-Create': 'true'})
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': Timestamp(1).internal},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.items()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEqual(len(http_connect_args), 2)
self.assertEqual(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEqual(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertTrue(container_controller(conf).replication_server)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(status, headers):
"""Sends args to outbuf"""
outbuf.writelines(status)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller(env, start_response)
self.assertEqual(response, method_res)
# The controller passed responsibility of calling start_response
# to the mock, which never did
self.assertEqual(outbuf.getvalue(), '')
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(status, headers):
"""Sends args to outbuf"""
outbuf.writelines(status)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = [b'<html><h1>Method Not Allowed</h1><p>The method is not '
b'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_replication_server_call_all_methods(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'})
def start_response(status, headers):
"""Sends args to outbuf"""
outbuf.writelines(status)
obj_methods = ['PUT', 'HEAD', 'GET', 'POST', 'DELETE', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'HTTP_X_TIMESTAMP': next(self.ts).internal,
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.controller(env, start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertIn(outbuf.getvalue()[:4], ('200 ', '201 ', '204 '))
def test__call__raise_timeout(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.logger = debug_logger('test')
self.container_controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=self.logger)
def start_response(status, headers):
# Sends args to outbuf
outbuf.writelines(status)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
@public
def mock_put_method(*args, **kwargs):
raise Exception()
with mock.patch.object(self.container_controller, method,
new=mock_put_method):
response = self.container_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
b'Traceback (most recent call last):'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR __call__ error with %(method)s %(path)s : ' % {
'method': 'PUT', 'path': '/sda1/p/a/c'},
])
self.assertEqual(self.logger.get_lines_for_level('info'), [])
self.assertEqual(outbuf.getvalue()[:4], '500 ')
def test_GET_log_requests_true(self):
self.controller.log_requests = True
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.log_requests = False
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
with mock.patch('time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0,
10002.0, 10002.0])), \
mock.patch('os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
info_lines = self.controller.logger.get_lines_for_level('info')
self.assertEqual(info_lines, [
'1.2.3.4 - - [01/Jan/1970:02:46:42 +0000] "HEAD /sda1/p/a/c" '
'404 - "-" "-" "-" 2.0000 "-" 1234 0',
])
@patch_policies([
StoragePolicy(0, 'legacy'),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two', True),
StoragePolicy(3, 'three'),
StoragePolicy(4, 'four'),
])
class TestNonLegacyDefaultStoragePolicy(TestContainerController):
"""
Test swift.container.server.ContainerController with a non-legacy default
Storage Policy.
"""
def _update_object_put_headers(self, req):
"""
Add policy index headers for containers created with default policy
- which in this TestCase is 2.
"""
req.headers['X-Backend-Storage-Policy-Index'] = \
str(POLICIES.default.idx)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_server.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import shutil
import itertools
import unittest
import mock
import random
import sqlite3
from eventlet import sleep
from swift.common import db_replicator
from swift.common.swob import HTTPServerError
from swift.container import replicator, backend, server, sync_store
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, get_reconciler_container_name)
from swift.common.utils import Timestamp, encode_timestamps, ShardRange, \
get_db_files, make_db_file_path
from swift.common.storage_policy import POLICIES
from test.debug_logger import debug_logger
from test.unit.common import test_db_replicator
from test.unit import patch_policies, make_timestamp_iter, mock_check_drive, \
EMPTY_ETAG, attach_fake_replication_rpc, FakeHTTPResponse
from contextlib import contextmanager
@patch_policies
class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
backend = backend.ContainerBroker
datadir = server.DATADIR
replicator_daemon = replicator.ContainerReplicator
replicator_rpc = replicator.ContainerReplicatorRpc
def assertShardRangesEqual(self, x, y):
# ShardRange.__eq__ only compares lower and upper; here we generate
# dict representations to compare all attributes
self.assertEqual([dict(sr) for sr in x], [dict(sr) for sr in y])
def assertShardRangesNotEqual(self, x, y):
# ShardRange.__eq__ only compares lower and upper; here we generate
# dict representations to compare all attributes
self.assertNotEqual([dict(sr) for sr in x], [dict(sr) for sr in y])
def test_report_up_to_date(self):
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(Timestamp(1).internal, int(POLICIES.default))
info = broker.get_info()
broker.reported(info['put_timestamp'],
info['delete_timestamp'],
info['object_count'],
info['bytes_used'])
full_info = broker.get_replication_info()
expected_info = {'put_timestamp': Timestamp(1).internal,
'delete_timestamp': '0',
'count': 0,
'bytes_used': 0,
'reported_put_timestamp': Timestamp(1).internal,
'reported_delete_timestamp': '0',
'reported_object_count': 0,
'reported_bytes_used': 0}
for key, value in expected_info.items():
msg = 'expected value for %r, %r != %r' % (
key, full_info[key], value)
self.assertEqual(full_info[key], value, msg)
repl = replicator.ContainerReplicator({})
self.assertTrue(repl.report_up_to_date(full_info))
full_info['delete_timestamp'] = Timestamp(2).internal
self.assertFalse(repl.report_up_to_date(full_info))
full_info['reported_delete_timestamp'] = Timestamp(2).internal
self.assertTrue(repl.report_up_to_date(full_info))
full_info['count'] = 1
self.assertFalse(repl.report_up_to_date(full_info))
full_info['reported_object_count'] = 1
self.assertTrue(repl.report_up_to_date(full_info))
full_info['bytes_used'] = 1
self.assertFalse(repl.report_up_to_date(full_info))
full_info['reported_bytes_used'] = 1
self.assertTrue(repl.report_up_to_date(full_info))
full_info['put_timestamp'] = Timestamp(3).internal
self.assertFalse(repl.report_up_to_date(full_info))
full_info['reported_put_timestamp'] = Timestamp(3).internal
self.assertTrue(repl.report_up_to_date(full_info))
def test_sync_remote_in_sync(self):
# setup a local container
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp, POLICIES.default.idx)
# "replicate" to same database
node = {'device': 'sdb', 'replication_ip': '127.0.0.1'}
daemon = replicator.ContainerReplicator({})
# replicate
part, node = self._get_broker_part_node(broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
self.assertEqual(1, daemon.stats['no_change'])
def test_sync_remote_with_timings(self):
ts_iter = make_timestamp_iter()
# setup a local container
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = next(ts_iter)
broker.initialize(put_timestamp.internal, POLICIES.default.idx)
broker.update_metadata(
{'x-container-meta-test': ('foo', put_timestamp.internal)})
# setup remote container
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(next(ts_iter).internal, POLICIES.default.idx)
timestamp = next(ts_iter)
for db in (broker, remote_broker):
db.put_object(
'/a/c/o', timestamp.internal, 0, 'content-type', 'etag',
storage_policy_index=db.storage_policy_index)
# replicate
daemon = replicator.ContainerReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', -1):
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
self.assertEqual(1, daemon.stats['no_change'])
expected_timings = ('info', 'update_metadata', 'merge_timestamps',
'get_sync', 'merge_syncs')
debug_lines = self.rpc.logger.logger.get_lines_for_level('debug')
self.assertEqual(len(expected_timings), len(debug_lines),
'Expected %s debug lines but only got %s: %s' %
(len(expected_timings), len(debug_lines),
debug_lines))
for metric in expected_timings:
expected = 'replicator-rpc-sync time for %s:' % metric
self.assertTrue(any(expected in line for line in debug_lines),
'debug timing %r was not in %r' % (
expected, debug_lines))
def test_sync_remote_missing(self):
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp, POLICIES.default.idx)
# "replicate"
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# complete rsync to all other nodes
self.assertEqual(2, daemon.stats['rsync'])
for i in range(1, 3):
remote_broker = self._get_broker('a', 'c', node_index=i)
self.assertTrue(os.path.exists(remote_broker.db_file))
remote_info = remote_broker.get_info()
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_rsync_failure(self):
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp, POLICIES.default.idx)
# "replicate" to different device
daemon = replicator.ContainerReplicator({})
def _rsync_file(*args, **kwargs):
return False
daemon._rsync_file = _rsync_file
# replicate
part, local_node = self._get_broker_part_node(broker)
node = random.choice([n for n in self._ring.devs
if n['id'] != local_node['id']])
info = broker.get_replication_info()
with mock_check_drive(ismount=True):
success = daemon._repl_to_node(node, broker, part, info)
self.assertFalse(success)
def test_sync_remote_missing_most_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_timestamp, POLICIES.default.idx)
# create "remote" broker
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
# add a row to "local" db
broker.put_object('/a/c/o', time.time(), 0, 'content-type', 'etag',
storage_policy_index=broker.storage_policy_index)
# replicate
node = {'device': 'sdc', 'replication_ip': '127.0.0.1'}
daemon = replicator.ContainerReplicator({'per_diff': 1})
def _rsync_file(db_file, remote_file, **kwargs):
remote_server, remote_path = remote_file.split('/', 1)
dest_path = os.path.join(self.root, remote_path)
shutil.copy(db_file, dest_path)
return True
daemon._rsync_file = _rsync_file
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['remote_merge'])
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
remote_info = self._get_broker(
'a', 'c', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_missing_one_rows(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_timestamp, POLICIES.default.idx)
# create "remote" broker
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
# add some rows to both db
for i in range(10):
put_timestamp = time.time()
for db in (broker, remote_broker):
path = '/a/c/o_%s' % i
db.put_object(path, put_timestamp, 0, 'content-type', 'etag',
storage_policy_index=db.storage_policy_index)
# now a row to the "local" broker only
broker.put_object('/a/c/o_missing', time.time(), 0,
'content-type', 'etag',
storage_policy_index=broker.storage_policy_index)
# replicate
daemon = replicator.ContainerReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(1, daemon.stats['diff'])
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
remote_info = self._get_broker(
'a', 'c', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_sync_remote_can_not_keep_up(self):
put_timestamp = time.time()
# create "local" broker
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_timestamp, POLICIES.default.idx)
# create "remote" broker
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
# add some rows to both db's
for i in range(10):
put_timestamp = time.time()
for db in (broker, remote_broker):
obj_name = 'o_%s' % i
db.put_object(obj_name, put_timestamp, 0,
'content-type', 'etag',
storage_policy_index=db.storage_policy_index)
# setup REPLICATE callback to simulate adding rows during merge_items
missing_counter = itertools.count()
def put_more_objects(op, *args):
if op != 'merge_items':
return
path = '/a/c/o_missing_%s' % next(missing_counter)
broker.put_object(path, time.time(), 0, 'content-type', 'etag',
storage_policy_index=db.storage_policy_index)
test_db_replicator.FakeReplConnection = \
test_db_replicator.attach_fake_replication_rpc(
self.rpc, replicate_hook=put_more_objects)
db_replicator.ReplConnection = test_db_replicator.FakeReplConnection
# and add one extra to local db to trigger merge_items
put_more_objects('merge_items')
# limit number of times we'll call merge_items
daemon = replicator.ContainerReplicator({'max_diffs': 10})
# replicate
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertFalse(success)
# back off on the PUTs during replication...
FakeReplConnection = test_db_replicator.attach_fake_replication_rpc(
self.rpc, replicate_hook=None)
db_replicator.ReplConnection = FakeReplConnection
# retry replication
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# row merge
self.assertEqual(2, daemon.stats['diff'])
self.assertEqual(1, daemon.stats['diff_capped'])
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
remote_info = self._get_broker(
'a', 'c', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
def test_diff_capped_sync(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
put_timestamp = next(ts)
# start off with with a local db that is way behind
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_timestamp, POLICIES.default.idx)
for i in range(50):
broker.put_object(
'o%s' % i, next(ts), 0, 'content-type-old', 'etag',
storage_policy_index=broker.storage_policy_index)
# remote primary db has all the new bits...
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
for i in range(100):
remote_broker.put_object(
'o%s' % i, next(ts), 0, 'content-type-new', 'etag',
storage_policy_index=remote_broker.storage_policy_index)
# except there's *one* tiny thing in our local broker that's newer
broker.put_object(
'o101', next(ts), 0, 'content-type-new', 'etag',
storage_policy_index=broker.storage_policy_index)
# setup daemon with smaller per_diff and max_diffs
part, node = self._get_broker_part_node(broker)
daemon = self._get_daemon(node, conf_updates={'per_diff': 10,
'max_diffs': 3})
self.assertEqual(daemon.per_diff, 10)
self.assertEqual(daemon.max_diffs, 3)
# run once and verify diff capped
self._run_once(node, daemon=daemon)
self.assertEqual(1, daemon.stats['diff'])
self.assertEqual(1, daemon.stats['diff_capped'])
# run again and verify fully synced
self._run_once(node, daemon=daemon)
self.assertEqual(1, daemon.stats['diff'])
self.assertEqual(0, daemon.stats['diff_capped'])
# now that we're synced the new item should be in remote db
remote_names = set()
for item in remote_broker.list_objects_iter(500, '', '', '', ''):
name, ts, size, content_type, etag = item
remote_names.add(name)
self.assertEqual(content_type, 'content-type-new')
self.assertTrue('o101' in remote_names)
self.assertEqual(len(remote_names), 101)
self.assertEqual(remote_broker.get_info()['object_count'], 101)
def test_sync_status_change(self):
# setup a local container
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp, POLICIES.default.idx)
# setup remote container
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
# delete local container
broker.delete_db(time.time())
# replicate
daemon = replicator.ContainerReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
self.assertEqual(1, daemon.stats['no_change'])
# status in sync
self.assertTrue(remote_broker.is_deleted())
info = broker.get_info()
remote_info = remote_broker.get_info()
self.assertTrue(Timestamp(remote_info['status_changed_at']) >
Timestamp(remote_info['put_timestamp']),
'remote status_changed_at (%s) is not '
'greater than put_timestamp (%s)' % (
remote_info['status_changed_at'],
remote_info['put_timestamp']))
self.assertTrue(Timestamp(remote_info['status_changed_at']) >
Timestamp(info['status_changed_at']),
'remote status_changed_at (%s) is not '
'greater than local status_changed_at (%s)' % (
remote_info['status_changed_at'],
info['status_changed_at']))
@contextmanager
def _wrap_merge_timestamps(self, broker, calls):
def fake_merge_timestamps(*args, **kwargs):
calls.append(args[0])
orig_merge_timestamps(*args, **kwargs)
orig_merge_timestamps = broker.merge_timestamps
broker.merge_timestamps = fake_merge_timestamps
try:
yield True
finally:
broker.merge_timestamps = orig_merge_timestamps
def test_sync_merge_timestamps(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# setup a local container
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = next(ts)
broker.initialize(put_timestamp, POLICIES.default.idx)
# setup remote container
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_put_timestamp = next(ts)
remote_broker.initialize(remote_put_timestamp, POLICIES.default.idx)
# replicate, expect call to merge_timestamps on remote and local
daemon = replicator.ContainerReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
local_calls = []
remote_calls = []
with self._wrap_merge_timestamps(broker, local_calls):
with self._wrap_merge_timestamps(broker, remote_calls):
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
self.assertEqual(1, len(remote_calls))
self.assertEqual(1, len(local_calls))
self.assertEqual(remote_put_timestamp,
broker.get_info()['put_timestamp'])
self.assertEqual(remote_put_timestamp,
remote_broker.get_info()['put_timestamp'])
# replicate again, no changes so expect no calls to merge_timestamps
info = broker.get_replication_info()
local_calls = []
remote_calls = []
with self._wrap_merge_timestamps(broker, local_calls):
with self._wrap_merge_timestamps(broker, remote_calls):
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
self.assertEqual(0, len(remote_calls))
self.assertEqual(0, len(local_calls))
self.assertEqual(remote_put_timestamp,
broker.get_info()['put_timestamp'])
self.assertEqual(remote_put_timestamp,
remote_broker.get_info()['put_timestamp'])
def test_sync_bogus_db_quarantines(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
policy = random.choice(list(POLICIES))
# create "local" broker
local_broker = self._get_broker('a', 'c', node_index=0)
local_broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(next(ts), policy.idx)
db_path = local_broker.db_file
self.assertTrue(os.path.exists(db_path)) # sanity check
old_inode = os.stat(db_path).st_ino
_orig_get_info = backend.ContainerBroker.get_info
def fail_like_bad_db(broker):
if broker.db_file == local_broker.db_file:
raise sqlite3.OperationalError("no such table: container_info")
else:
return _orig_get_info(broker)
part, node = self._get_broker_part_node(remote_broker)
with mock.patch('swift.container.backend.ContainerBroker.get_info',
fail_like_bad_db):
# Have the remote node replicate to local; local should see its
# corrupt DB, quarantine it, and act like the DB wasn't ever there
# in the first place.
daemon = self._run_once(node)
self.assertTrue(os.path.exists(db_path))
# Make sure we didn't just keep the old DB, but quarantined it and
# made a fresh copy.
new_inode = os.stat(db_path).st_ino
self.assertNotEqual(old_inode, new_inode)
self.assertEqual(daemon.stats['failure'], 0)
def _replication_scenarios(self, *scenarios, **kwargs):
remote_wins = kwargs.get('remote_wins', False)
# these tests are duplicated because of the differences in replication
# when row counts cause full rsync vs. merge
scenarios = scenarios or (
'no_row', 'local_row', 'remote_row', 'both_rows')
for scenario_name in scenarios:
ts = itertools.count(int(time.time()))
policy = random.choice(list(POLICIES))
remote_policy = random.choice(
[p for p in POLICIES if p is not policy])
broker = self._get_broker('a', 'c', node_index=0)
remote_broker = self._get_broker('a', 'c', node_index=1)
yield ts, policy, remote_policy, broker, remote_broker
# variations on different replication scenarios
variations = {
'no_row': (),
'local_row': (broker,),
'remote_row': (remote_broker,),
'both_rows': (broker, remote_broker),
}
dbs = variations[scenario_name]
obj_ts = next(ts)
for db in dbs:
db.put_object('/a/c/o', obj_ts, 0, 'content-type', 'etag',
storage_policy_index=db.storage_policy_index)
# replicate
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
self.assertEqual(0, daemon.stats['failure'])
# in sync
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
remote_info = self._get_broker(
'a', 'c', node_index=1).get_info()
if remote_wins:
expected = remote_policy.idx
err = 'local policy did not change to match remote ' \
'for replication row scenario %s' % scenario_name
else:
expected = policy.idx
err = 'local policy changed to match remote ' \
'for replication row scenario %s' % scenario_name
self.assertEqual(local_info['storage_policy_index'], expected, err)
self.assertEqual(remote_info['storage_policy_index'],
local_info['storage_policy_index'])
test_db_replicator.TestReplicatorSync.tearDown(self)
test_db_replicator.TestReplicatorSync.setUp(self)
def test_sync_local_create_policy_over_newer_remote_create(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
def test_sync_local_create_policy_over_newer_remote_delete(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create older "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# delete "remote" broker
remote_broker.delete_db(next(ts))
def test_sync_local_create_policy_over_older_remote_delete(self):
# remote_row & both_rows cases are covered by
# "test_sync_remote_half_delete_policy_over_newer_local_create"
for setup in self._replication_scenarios(
'no_row', 'local_row'):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# delete older "remote" broker
remote_broker.delete_db(next(ts))
# create "local" broker
broker.initialize(next(ts), policy.idx)
def test_sync_local_half_delete_policy_over_newer_remote_create(self):
# no_row & remote_row cases are covered by
# "test_sync_remote_create_policy_over_older_local_delete"
for setup in self._replication_scenarios('local_row', 'both_rows'):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "local" broker
broker.initialize(next(ts), policy.idx)
# half delete older "local" broker
broker.delete_db(next(ts))
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
def test_sync_local_recreate_policy_over_newer_remote_create(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# older recreate "local" broker
broker.delete_db(next(ts))
recreate_timestamp = next(ts)
broker.update_put_timestamp(recreate_timestamp)
broker.update_status_changed_at(recreate_timestamp)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
def test_sync_local_recreate_policy_over_older_remote_create(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create older "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# create "local" broker
broker.initialize(next(ts), policy.idx)
# recreate "local" broker
broker.delete_db(next(ts))
recreate_timestamp = next(ts)
broker.update_put_timestamp(recreate_timestamp)
broker.update_status_changed_at(recreate_timestamp)
def test_sync_local_recreate_policy_over_newer_remote_delete(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# recreate "local" broker
broker.delete_db(next(ts))
recreate_timestamp = next(ts)
broker.update_put_timestamp(recreate_timestamp)
broker.update_status_changed_at(recreate_timestamp)
# older delete "remote" broker
remote_broker.delete_db(next(ts))
def test_sync_local_recreate_policy_over_older_remote_delete(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# older delete "remote" broker
remote_broker.delete_db(next(ts))
# recreate "local" broker
broker.delete_db(next(ts))
recreate_timestamp = next(ts)
broker.update_put_timestamp(recreate_timestamp)
broker.update_status_changed_at(recreate_timestamp)
def test_sync_local_recreate_policy_over_older_remote_recreate(self):
for setup in self._replication_scenarios():
ts, policy, remote_policy, broker, remote_broker = setup
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# create "local" broker
broker.initialize(next(ts), policy.idx)
# older recreate "remote" broker
remote_broker.delete_db(next(ts))
remote_recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(remote_recreate_timestamp)
remote_broker.update_status_changed_at(remote_recreate_timestamp)
# recreate "local" broker
broker.delete_db(next(ts))
local_recreate_timestamp = next(ts)
broker.update_put_timestamp(local_recreate_timestamp)
broker.update_status_changed_at(local_recreate_timestamp)
def test_sync_remote_create_policy_over_newer_local_create(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# create "local" broker
broker.initialize(next(ts), policy.idx)
def test_sync_remote_create_policy_over_newer_local_delete(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# create "local" broker
broker.initialize(next(ts), policy.idx)
# delete "local" broker
broker.delete_db(next(ts))
def test_sync_remote_create_policy_over_older_local_delete(self):
# local_row & both_rows cases are covered by
# "test_sync_local_half_delete_policy_over_newer_remote_create"
for setup in self._replication_scenarios(
'no_row', 'remote_row', remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "local" broker
broker.initialize(next(ts), policy.idx)
# delete older "local" broker
broker.delete_db(next(ts))
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
def test_sync_remote_half_delete_policy_over_newer_local_create(self):
# no_row & both_rows cases are covered by
# "test_sync_local_create_policy_over_older_remote_delete"
for setup in self._replication_scenarios('remote_row', 'both_rows',
remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# half delete older "remote" broker
remote_broker.delete_db(next(ts))
# create "local" broker
broker.initialize(next(ts), policy.idx)
def test_sync_remote_recreate_policy_over_newer_local_create(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# older recreate "remote" broker
remote_broker.delete_db(next(ts))
recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(recreate_timestamp)
remote_broker.update_status_changed_at(recreate_timestamp)
# create "local" broker
broker.initialize(next(ts), policy.idx)
def test_sync_remote_recreate_policy_over_older_local_create(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# recreate "remote" broker
remote_broker.delete_db(next(ts))
recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(recreate_timestamp)
remote_broker.update_status_changed_at(recreate_timestamp)
def test_sync_remote_recreate_policy_over_newer_local_delete(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# recreate "remote" broker
remote_broker.delete_db(next(ts))
remote_recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(remote_recreate_timestamp)
remote_broker.update_status_changed_at(remote_recreate_timestamp)
# older delete "local" broker
broker.delete_db(next(ts))
def test_sync_remote_recreate_policy_over_older_local_delete(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# older delete "local" broker
broker.delete_db(next(ts))
# recreate "remote" broker
remote_broker.delete_db(next(ts))
remote_recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(remote_recreate_timestamp)
remote_broker.update_status_changed_at(remote_recreate_timestamp)
def test_sync_remote_recreate_policy_over_older_local_recreate(self):
for setup in self._replication_scenarios(remote_wins=True):
ts, policy, remote_policy, broker, remote_broker = setup
# create older "local" broker
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_broker.initialize(next(ts), remote_policy.idx)
# older recreate "local" broker
broker.delete_db(next(ts))
local_recreate_timestamp = next(ts)
broker.update_put_timestamp(local_recreate_timestamp)
broker.update_status_changed_at(local_recreate_timestamp)
# recreate "remote" broker
remote_broker.delete_db(next(ts))
remote_recreate_timestamp = next(ts)
remote_broker.update_put_timestamp(remote_recreate_timestamp)
remote_broker.update_status_changed_at(remote_recreate_timestamp)
def test_sync_to_remote_with_misplaced(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create "local" broker
policy = random.choice(list(POLICIES))
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(next(ts), policy.idx)
# create "remote" broker
remote_policy = random.choice([p for p in POLICIES if p is not
policy])
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(next(ts), remote_policy.idx)
# add misplaced row to remote_broker
remote_broker.put_object(
'/a/c/o', next(ts), 0, 'content-type',
'etag', storage_policy_index=remote_broker.storage_policy_index)
# since this row matches policy index or remote, it shows up in count
self.assertEqual(remote_broker.get_info()['object_count'], 1)
self.assertEqual([], remote_broker.get_misplaced_since(-1, 1))
# replicate
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# since our local broker has no rows to push it logs as no_change
self.assertEqual(1, daemon.stats['no_change'])
self.assertEqual(0, broker.get_info()['object_count'])
# remote broker updates it's policy index; this makes the remote
# broker's object count change
info = remote_broker.get_info()
expectations = {
'object_count': 0,
'storage_policy_index': policy.idx,
}
for key, value in expectations.items():
self.assertEqual(info[key], value)
# but it also knows those objects are misplaced now
misplaced = remote_broker.get_misplaced_since(-1, 100)
self.assertEqual(len(misplaced), 1)
# we also pushed out to node 3 with rsync
self.assertEqual(1, daemon.stats['rsync'])
third_broker = self._get_broker('a', 'c', node_index=2)
info = third_broker.get_info()
for key, value in expectations.items():
self.assertEqual(info[key], value)
def test_misplaced_rows_replicate_and_enqueue(self):
# force all timestamps to fall in same hour
ts = (Timestamp(t) for t in
itertools.count(int(time.time()) // 3600 * 3600))
policy = random.choice(list(POLICIES))
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(next(ts).internal, policy.idx)
remote_policy = random.choice([p for p in POLICIES if p is not
policy])
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(next(ts).internal, remote_policy.idx)
# add a misplaced row to *local* broker
obj_put_timestamp = next(ts).internal
broker.put_object(
'o', obj_put_timestamp, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 1)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# add another misplaced row to *local* broker with composite timestamp
ts_data = next(ts)
ts_ctype = next(ts)
ts_meta = next(ts)
broker.put_object(
'o2', ts_data.internal, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx,
ctype_timestamp=ts_ctype.internal, meta_timestamp=ts_meta.internal)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# replicate
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# push to remote, and third node was missing (also maybe reconciler)
self.assertTrue(2 < daemon.stats['rsync'] <= 3, daemon.stats['rsync'])
self.assertEqual(
1, self.logger.statsd_client.get_stats_counts().get(
'reconciler_db_created'))
self.assertFalse(
self.logger.statsd_client.get_stats_counts().get(
'reconciler_db_exists'))
# grab the rsynced instance of remote_broker
remote_broker = self._get_broker('a', 'c', node_index=1)
# remote has misplaced rows too now
misplaced = remote_broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# and the correct policy_index and object_count
info = remote_broker.get_info()
expectations = {
'object_count': 0,
'storage_policy_index': policy.idx,
}
for key, value in expectations.items():
self.assertEqual(info[key], value)
# and we should have also enqueued these rows in a single reconciler,
# since we forced the object timestamps to be in the same hour.
self.logger.clear()
reconciler = daemon.get_reconciler_broker(misplaced[0]['created_at'])
self.assertFalse(
self.logger.statsd_client.get_stats_counts().get(
'reconciler_db_created'))
self.assertEqual(
1, self.logger.statsd_client.get_stats_counts().get(
'reconciler_db_exists'))
# but it may not be on the same node as us anymore though...
reconciler = self._get_broker(reconciler.account,
reconciler.container, node_index=0)
self.assertEqual(reconciler.get_info()['object_count'], 2)
objects = reconciler.list_objects_iter(
10, '', None, None, None, None, storage_policy_index=0)
self.assertEqual(len(objects), 2)
expected = ('%s:/a/c/o' % remote_policy.idx, obj_put_timestamp, 0,
'application/x-put', obj_put_timestamp)
self.assertEqual(objects[0], expected)
# the second object's listing has ts_meta as its last modified time
# but its full composite timestamp is in the hash field.
expected = ('%s:/a/c/o2' % remote_policy.idx, ts_meta.internal, 0,
'application/x-put',
encode_timestamps(ts_data, ts_ctype, ts_meta))
self.assertEqual(objects[1], expected)
# having safely enqueued to the reconciler we can advance
# our sync pointer
self.assertEqual(broker.get_reconciler_sync(), 2)
def test_misplaced_rows_replicate_and_enqueue_from_old_style_shard(self):
# force all timestamps to fall in same hour
ts = (Timestamp(t) for t in
itertools.count(int(time.time()) // 3600 * 3600))
policy = random.choice(list(POLICIES))
broker = self._get_broker('.shards_a', 'some-other-c', node_index=0)
broker.initialize(next(ts).internal, policy.idx)
broker.set_sharding_sysmeta('Root', 'a/c')
remote_policy = random.choice([p for p in POLICIES if p is not
policy])
remote_broker = self._get_broker(
'.shards_a', 'some-other-c', node_index=1)
remote_broker.initialize(next(ts).internal, remote_policy.idx)
# add a misplaced row to *local* broker
obj_put_timestamp = next(ts).internal
broker.put_object(
'o', obj_put_timestamp, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 1)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# add another misplaced row to *local* broker with composite timestamp
ts_data = next(ts)
ts_ctype = next(ts)
ts_meta = next(ts)
broker.put_object(
'o2', ts_data.internal, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx,
ctype_timestamp=ts_ctype.internal, meta_timestamp=ts_meta.internal)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# replicate
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# push to remote, and third node was missing (also maybe reconciler)
self.assertTrue(2 < daemon.stats['rsync'] <= 3, daemon.stats['rsync'])
# grab the rsynced instance of remote_broker
remote_broker = self._get_broker(
'.shards_a', 'some-other-c', node_index=1)
# remote has misplaced rows too now
misplaced = remote_broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# and the correct policy_index and object_count
info = remote_broker.get_info()
expectations = {
'object_count': 0,
'storage_policy_index': policy.idx,
}
for key, value in expectations.items():
self.assertEqual(info[key], value)
# and we should have also enqueued these rows in a single reconciler,
# since we forced the object timestamps to be in the same hour.
reconciler = daemon.get_reconciler_broker(misplaced[0]['created_at'])
# but it may not be on the same node as us anymore though...
reconciler = self._get_broker(reconciler.account,
reconciler.container, node_index=0)
self.assertEqual(reconciler.get_info()['object_count'], 2)
objects = reconciler.list_objects_iter(
10, '', None, None, None, None, storage_policy_index=0)
self.assertEqual(len(objects), 2)
# NB: reconciler work is for the *root* container!
expected = ('%s:/a/c/o' % remote_policy.idx, obj_put_timestamp, 0,
'application/x-put', obj_put_timestamp)
self.assertEqual(objects[0], expected)
# the second object's listing has ts_meta as its last modified time
# but its full composite timestamp is in the hash field.
expected = ('%s:/a/c/o2' % remote_policy.idx, ts_meta.internal, 0,
'application/x-put',
encode_timestamps(ts_data, ts_ctype, ts_meta))
self.assertEqual(objects[1], expected)
# having safely enqueued to the reconciler we can advance
# our sync pointer
self.assertEqual(broker.get_reconciler_sync(), 2)
def test_misplaced_rows_replicate_and_enqueue_from_shard(self):
# force all timestamps to fall in same hour
ts = (Timestamp(t) for t in
itertools.count(int(time.time()) // 3600 * 3600))
policy = random.choice(list(POLICIES))
broker = self._get_broker('.shards_a', 'some-other-c', node_index=0)
broker.initialize(next(ts).internal, policy.idx)
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
remote_policy = random.choice([p for p in POLICIES if p is not
policy])
remote_broker = self._get_broker(
'.shards_a', 'some-other-c', node_index=1)
remote_broker.initialize(next(ts).internal, remote_policy.idx)
# add a misplaced row to *local* broker
obj_put_timestamp = next(ts).internal
broker.put_object(
'o', obj_put_timestamp, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 1)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# add another misplaced row to *local* broker with composite timestamp
ts_data = next(ts)
ts_ctype = next(ts)
ts_meta = next(ts)
broker.put_object(
'o2', ts_data.internal, 0, 'content-type',
'etag', storage_policy_index=remote_policy.idx,
ctype_timestamp=ts_ctype.internal, meta_timestamp=ts_meta.internal)
misplaced = broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# since this row is misplaced it doesn't show up in count
self.assertEqual(broker.get_info()['object_count'], 0)
# replicate
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
# push to remote, and third node was missing (also maybe reconciler)
self.assertTrue(2 < daemon.stats['rsync'] <= 3, daemon.stats['rsync'])
# grab the rsynced instance of remote_broker
remote_broker = self._get_broker(
'.shards_a', 'some-other-c', node_index=1)
# remote has misplaced rows too now
misplaced = remote_broker.get_misplaced_since(-1, 10)
self.assertEqual(len(misplaced), 2)
# and the correct policy_index and object_count
info = remote_broker.get_info()
expectations = {
'object_count': 0,
'storage_policy_index': policy.idx,
}
for key, value in expectations.items():
self.assertEqual(info[key], value)
# and we should have also enqueued these rows in a single reconciler,
# since we forced the object timestamps to be in the same hour.
reconciler = daemon.get_reconciler_broker(misplaced[0]['created_at'])
# but it may not be on the same node as us anymore though...
reconciler = self._get_broker(reconciler.account,
reconciler.container, node_index=0)
self.assertEqual(reconciler.get_info()['object_count'], 2)
objects = reconciler.list_objects_iter(
10, '', None, None, None, None, storage_policy_index=0)
self.assertEqual(len(objects), 2)
# NB: reconciler work is for the *root* container!
expected = ('%s:/a/c/o' % remote_policy.idx, obj_put_timestamp, 0,
'application/x-put', obj_put_timestamp)
self.assertEqual(objects[0], expected)
# the second object's listing has ts_meta as its last modified time
# but its full composite timestamp is in the hash field.
expected = ('%s:/a/c/o2' % remote_policy.idx, ts_meta.internal, 0,
'application/x-put',
encode_timestamps(ts_data, ts_ctype, ts_meta))
self.assertEqual(objects[1], expected)
# having safely enqueued to the reconciler we can advance
# our sync pointer
self.assertEqual(broker.get_reconciler_sync(), 2)
def test_multiple_out_sync_reconciler_enqueue_normalize(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
policy = random.choice(list(POLICIES))
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(next(ts), policy.idx)
remote_policy = random.choice([p for p in POLICIES if p is not
policy])
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(next(ts), remote_policy.idx)
# add some rows to brokers
for db in (broker, remote_broker):
for p in (policy, remote_policy):
db.put_object('o-%s' % p.name, next(ts), 0, 'content-type',
'etag', storage_policy_index=p.idx)
db._commit_puts()
expected_policy_stats = {
policy.idx: {'object_count': 1, 'bytes_used': 0},
remote_policy.idx: {'object_count': 1, 'bytes_used': 0},
}
for db in (broker, remote_broker):
policy_stats = db.get_policy_stats()
self.assertEqual(policy_stats, expected_policy_stats)
# each db has 2 rows, 4 total
all_items = set()
for db in (broker, remote_broker):
items = db.get_items_since(-1, 4)
all_items.update(
(item['name'], item['created_at']) for item in items)
self.assertEqual(4, len(all_items))
# replicate both ways
part, node = self._get_broker_part_node(broker)
self._run_once(node)
part, node = self._get_broker_part_node(remote_broker)
self._run_once(node)
# only the latest timestamps should survive
most_recent_items = {}
for name, timestamp in all_items:
most_recent_items[name] = max(
timestamp, most_recent_items.get(name, ''))
self.assertEqual(2, len(most_recent_items))
for db in (broker, remote_broker):
items = db.get_items_since(-1, 4)
self.assertEqual(len(items), len(most_recent_items))
for item in items:
self.assertEqual(most_recent_items[item['name']],
item['created_at'])
# and the reconciler also collapses updates
reconciler_containers = set()
for item in all_items:
_name, timestamp = item
reconciler_containers.add(
get_reconciler_container_name(timestamp))
reconciler_items = set()
for reconciler_container in reconciler_containers:
for node_index in range(3):
reconciler = self._get_broker(MISPLACED_OBJECTS_ACCOUNT,
reconciler_container,
node_index=node_index)
items = reconciler.get_items_since(-1, 4)
reconciler_items.update(
(item['name'], item['created_at']) for item in items)
# they can't *both* be in the wrong policy ;)
self.assertEqual(1, len(reconciler_items))
for reconciler_name, timestamp in reconciler_items:
_policy_index, path = reconciler_name.split(':', 1)
a, c, name = path.lstrip('/').split('/')
self.assertEqual(most_recent_items[name], timestamp)
@contextmanager
def _wrap_update_reconciler_sync(self, broker, calls):
def wrapper_function(*args, **kwargs):
calls.append(args)
orig_function(*args, **kwargs)
orig_function = broker.update_reconciler_sync
broker.update_reconciler_sync = wrapper_function
try:
yield True
finally:
broker.update_reconciler_sync = orig_function
def test_post_replicate_hook(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(next(ts), 0)
broker.put_object('foo', next(ts), 0, 'text/plain', 'xyz', deleted=0,
storage_policy_index=0)
info = broker.get_replication_info()
self.assertEqual(1, info['max_row'])
self.assertEqual(-1, broker.get_reconciler_sync())
daemon = replicator.ContainerReplicator({})
calls = []
with self._wrap_update_reconciler_sync(broker, calls):
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(1, len(calls))
# repeated call to _post_replicate_hook with no change to info
# should not call update_reconciler_sync
calls = []
with self._wrap_update_reconciler_sync(broker, calls):
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, len(calls))
def test_update_sync_store_exception(self):
class FakeContainerSyncStore(object):
def update_sync_store(self, broker):
raise OSError(1, '1')
daemon = replicator.ContainerReplicator({}, logger=self.logger)
daemon.sync_store = FakeContainerSyncStore()
ts_iter = make_timestamp_iter()
broker = self._get_broker('a', 'c', node_index=0)
timestamp = next(ts_iter)
broker.initialize(timestamp.internal, POLICIES.default.idx)
info = broker.get_replication_info()
daemon._post_replicate_hook(broker, info, [])
log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(log_lines))
self.assertIn('Failed to update sync_store', log_lines[0])
def test_update_sync_store(self):
klass = 'swift.container.sync_store.ContainerSyncStore'
daemon = replicator.ContainerReplicator({})
daemon.sync_store = sync_store.ContainerSyncStore(
daemon.root, daemon.logger, daemon.mount_check)
ts_iter = make_timestamp_iter()
broker = self._get_broker('a', 'c', node_index=0)
timestamp = next(ts_iter)
broker.initialize(timestamp.internal, POLICIES.default.idx)
info = broker.get_replication_info()
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_remove.call_count)
self.assertEqual(0, mock_add.call_count)
timestamp = next(ts_iter)
# sync-to and sync-key empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('', timestamp.internal),
'X-Container-Sync-Key': ('', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to is not empty sync-key is empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to is empty sync-key is not empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to, sync-key both not empty - add to store
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
mock_add.assert_called_once_with(broker)
self.assertEqual(0, mock_remove.call_count)
timestamp = next(ts_iter)
# container is removed - need to remove from store
broker.delete_db(timestamp.internal)
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
def test_sync_triggers_sync_store_update(self):
klass = 'swift.container.sync_store.ContainerSyncStore'
ts_iter = make_timestamp_iter()
# Create two containers as follows:
# broker_1 which is not set for sync
# broker_2 which is set for sync and then unset
# test that while replicating both we see no activity
# for broker_1, and the anticipated activity for broker_2
broker_1 = self._get_broker('a', 'c', node_index=0)
broker_1.initialize(next(ts_iter).internal, POLICIES.default.idx)
broker_2 = self._get_broker('b', 'd', node_index=0)
broker_2.initialize(next(ts_iter).internal, POLICIES.default.idx)
broker_2.update_metadata(
{'X-Container-Sync-To': ('a', next(ts_iter).internal),
'X-Container-Sync-Key': ('secret', next(ts_iter).internal)})
# replicate once according to broker_1
# relying on the fact that FakeRing would place both
# in the same partition.
part, node = self._get_broker_part_node(broker_1)
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
self._run_once(node)
self.assertEqual(1, mock_add.call_count)
self.assertEqual(broker_2.db_file, mock_add.call_args[0][0].db_file)
self.assertEqual(0, mock_remove.call_count)
broker_2.update_metadata(
{'X-Container-Sync-To': ('', next(ts_iter).internal)})
# replicate once this time according to broker_2
# relying on the fact that FakeRing would place both
# in the same partition.
part, node = self._get_broker_part_node(broker_2)
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
self._run_once(node)
self.assertEqual(0, mock_add.call_count)
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(broker_2.db_file, mock_remove.call_args[0][0].db_file)
def test_cleanup_post_replicate(self):
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = Timestamp.now()
broker.initialize(put_timestamp.internal, POLICIES.default.idx)
orig_info = broker.get_replication_info()
daemon = replicator.ContainerReplicator({}, logger=self.logger)
# db should not be here, replication ok, deleted
res = daemon.cleanup_post_replicate(broker, orig_info, [True] * 3)
self.assertTrue(res)
self.assertFalse(os.path.exists(broker.db_file))
self.assertEqual(['Successfully deleted db %s' % broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
# failed replication, not deleted
broker.initialize(put_timestamp.internal, POLICIES.default.idx)
orig_info = broker.get_replication_info()
res = daemon.cleanup_post_replicate(broker, orig_info,
[False, True, True])
self.assertTrue(res)
self.assertTrue(os.path.exists(broker.db_file))
self.assertEqual(['Not deleting db %s (2/3 success)' % broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
# db has shard ranges, not deleted
broker.enable_sharding(Timestamp.now())
broker.merge_shard_ranges(
[ShardRange('.shards_a/c', Timestamp.now(), '', 'm')])
self.assertTrue(broker.sharding_required()) # sanity check
res = daemon.cleanup_post_replicate(broker, orig_info, [True] * 3)
self.assertTrue(res)
self.assertTrue(os.path.exists(broker.db_file))
self.assertEqual(
['Not deleting db %s (requires sharding, state unsharded)' %
broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
# db sharding, not deleted
self._goto_sharding_state(broker, Timestamp.now())
self.assertTrue(broker.sharding_required()) # sanity check
orig_info = broker.get_replication_info()
res = daemon.cleanup_post_replicate(broker, orig_info, [True] * 3)
self.assertTrue(res)
self.assertTrue(os.path.exists(broker.db_file))
self.assertEqual(
['Not deleting db %s (requires sharding, state sharding)' %
broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
# db sharded, should not be here, failed replication, not deleted
self._goto_sharded_state(broker)
self.assertFalse(broker.sharding_required()) # sanity check
res = daemon.cleanup_post_replicate(broker, orig_info,
[True, False, True])
self.assertTrue(res)
self.assertTrue(os.path.exists(broker.db_file))
self.assertEqual(['Not deleting db %s (2/3 success)' %
broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
# db sharded, should not be here, new shard ranges (e.g. from reverse
# replication), deleted
broker.merge_shard_ranges(
[ShardRange('.shards_a/c', Timestamp.now(), '', 'm')])
res = daemon.cleanup_post_replicate(broker, orig_info, [True] * 3)
self.assertTrue(res)
self.assertFalse(os.path.exists(broker.db_file))
daemon.logger.clear()
# db sharded, should not be here, replication ok, deleted
broker.initialize(put_timestamp.internal, POLICIES.default.idx)
self.assertTrue(os.path.exists(broker.db_file))
orig_info = broker.get_replication_info()
res = daemon.cleanup_post_replicate(broker, orig_info, [True] * 3)
self.assertTrue(res)
self.assertFalse(os.path.exists(broker.db_file))
self.assertEqual(['Successfully deleted db %s' % broker.db_file],
daemon.logger.get_lines_for_level('debug'))
daemon.logger.clear()
def test_sync_shard_ranges(self):
put_timestamp = Timestamp.now().internal
# create "local" broker
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_timestamp, POLICIES.default.idx)
# create "remote" broker
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_timestamp, POLICIES.default.idx)
def check_replicate(expected_shard_ranges, from_broker, to_broker):
daemon = replicator.ContainerReplicator({}, logger=debug_logger())
part, node = self._get_broker_part_node(to_broker)
info = broker.get_replication_info()
success = daemon._repl_to_node(node, from_broker, part, info)
self.assertTrue(success)
self.assertEqual(
expected_shard_ranges,
to_broker.get_all_shard_range_data()
)
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
remote_info = self._get_broker(
'a', 'c', node_index=1).get_info()
for k, v in local_info.items():
if k == 'id':
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
return daemon
bounds = (('', 'g'), ('g', 'r'), ('r', ''))
shard_ranges = [
ShardRange('.shards_a/sr-%s' % upper, Timestamp.now(), lower,
upper, i + 1, 10 * (i + 1))
for i, (lower, upper) in enumerate(bounds)
]
# add first two shard_ranges to both brokers
for shard_range in shard_ranges[:2]:
for db in (broker, remote_broker):
db.merge_shard_ranges(shard_range)
# now add a shard range and an object to the "local" broker only
broker.merge_shard_ranges(shard_ranges[2])
broker_ranges = broker.get_all_shard_range_data()
self.assertShardRangesEqual(shard_ranges, broker_ranges)
broker.put_object('obj', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG)
# sharding not yet enabled so replication not deferred
daemon = check_replicate(broker_ranges, broker, remote_broker)
self.assertEqual(0, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['no_change'])
self.assertEqual(0, daemon.stats['rsync'])
self.assertEqual(1, daemon.stats['diff'])
self.assertEqual({'diffs': 1},
daemon.logger.statsd_client.get_increment_counts())
# update one shard range
shard_ranges[1].update_meta(50, 50)
# sharding not yet enabled so replication not deferred, but the two
# brokers' object tables are in sync so no rsync or usync either
daemon = check_replicate(broker_ranges, broker, remote_broker)
self.assertEqual(0, daemon.stats['deferred'])
self.assertEqual(1, daemon.stats['no_change'])
self.assertEqual(0, daemon.stats['rsync'])
self.assertEqual(0, daemon.stats['diff'])
self.assertEqual({'no_changes': 1},
daemon.logger.statsd_client.get_increment_counts())
# now enable local broker for sharding
own_sr = broker.enable_sharding(Timestamp.now())
# update one shard range
shard_ranges[1].update_meta(13, 123)
broker.merge_shard_ranges(shard_ranges[1])
broker_ranges = broker.get_all_shard_range_data()
self.assertShardRangesEqual(shard_ranges + [own_sr], broker_ranges)
def check_stats(daemon):
self.assertEqual(1, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['no_change'])
self.assertEqual(0, daemon.stats['rsync'])
self.assertEqual(0, daemon.stats['diff'])
self.assertFalse(daemon.logger.statsd_client.get_increments())
daemon = check_replicate(broker_ranges, broker, remote_broker)
check_stats(daemon)
# update one shard range
shard_ranges[1].update_meta(99, 0)
broker.merge_shard_ranges(shard_ranges[1])
# sanity check
broker_ranges = broker.get_all_shard_range_data()
self.assertShardRangesEqual(shard_ranges + [own_sr], broker_ranges)
daemon = check_replicate(broker_ranges, broker, remote_broker)
check_stats(daemon)
# delete one shard range
shard_ranges[0].deleted = 1
shard_ranges[0].timestamp = Timestamp.now()
broker.merge_shard_ranges(shard_ranges[0])
# sanity check
broker_ranges = broker.get_all_shard_range_data()
self.assertShardRangesEqual(shard_ranges + [own_sr], broker_ranges)
daemon = check_replicate(broker_ranges, broker, remote_broker)
check_stats(daemon)
# put a shard range again
shard_ranges[2].timestamp = Timestamp.now()
shard_ranges[2].object_count = 0
broker.merge_shard_ranges(shard_ranges[2])
# sanity check
broker_ranges = broker.get_all_shard_range_data()
self.assertShardRangesEqual(shard_ranges + [own_sr], broker_ranges)
daemon = check_replicate(broker_ranges, broker, remote_broker)
check_stats(daemon)
# update same shard range on local and remote, remote later
shard_ranges[-1].meta_timestamp = Timestamp.now()
shard_ranges[-1].bytes_used += 1000
broker.merge_shard_ranges(shard_ranges[-1])
remote_shard_ranges = remote_broker.get_shard_ranges(
include_deleted=True)
remote_shard_ranges[-1].meta_timestamp = Timestamp.now()
remote_shard_ranges[-1].bytes_used += 2000
remote_broker.merge_shard_ranges(remote_shard_ranges[-1])
# sanity check
remote_broker_ranges = remote_broker.get_all_shard_range_data()
self.assertShardRangesEqual(remote_shard_ranges + [own_sr],
remote_broker_ranges)
self.assertShardRangesNotEqual(shard_ranges, remote_shard_ranges)
daemon = check_replicate(remote_broker_ranges, broker, remote_broker)
check_stats(daemon)
# undelete shard range *on the remote*
deleted_ranges = [sr for sr in remote_shard_ranges if sr.deleted]
self.assertEqual([shard_ranges[0]], deleted_ranges)
deleted_ranges[0].deleted = 0
deleted_ranges[0].timestamp = Timestamp.now()
remote_broker.merge_shard_ranges(deleted_ranges[0])
# sanity check
remote_broker_ranges = remote_broker.get_all_shard_range_data()
self.assertShardRangesEqual(remote_shard_ranges + [own_sr],
remote_broker_ranges)
self.assertShardRangesNotEqual(shard_ranges, remote_shard_ranges)
daemon = check_replicate(remote_broker_ranges, broker, remote_broker)
check_stats(daemon)
# reverse replication direction and expect syncs to propagate
daemon = check_replicate(remote_broker_ranges, remote_broker, broker)
check_stats(daemon)
def test_sync_shard_ranges_error(self):
# verify that replication is not considered successful if
# merge_shard_ranges fails
put_time = Timestamp.now().internal
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_time, POLICIES.default.idx)
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_time, POLICIES.default.idx)
# put an object into local broker
broker.put_object('obj', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG)
# get an own shard range into local broker
broker.enable_sharding(Timestamp.now())
self.assertFalse(broker.sharding_initiated())
replicate_hook = mock.MagicMock()
fake_repl_connection = attach_fake_replication_rpc(
self.rpc, errors={'merge_shard_ranges': [
FakeHTTPResponse(HTTPServerError())]},
replicate_hook=replicate_hook)
db_replicator.ReplConnection = fake_repl_connection
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
daemon = replicator.ContainerReplicator({})
daemon.logger = debug_logger()
success = daemon._repl_to_node(node, broker, part, info)
self.assertFalse(success)
# broker only has its own shard range so expect objects to be sync'd
self.assertEqual(
['sync', 'merge_shard_ranges', 'merge_items',
'merge_syncs'],
[call[0][0] for call in replicate_hook.call_args_list])
error_lines = daemon.logger.get_lines_for_level('error')
self.assertIn('Bad response 500', error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(1, daemon.stats['diff'])
self.assertEqual(
1, daemon.logger.statsd_client.get_increment_counts()['diffs'])
def test_sync_shard_ranges_timeout_in_fetch(self):
# verify that replication is not considered successful if
# merge_shard_ranges fails
put_time = Timestamp.now().internal
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_time, POLICIES.default.idx)
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_time, POLICIES.default.idx)
# get an own shard range into remote broker
remote_broker.enable_sharding(Timestamp.now())
replicate_calls = []
def replicate_hook(op, *args):
replicate_calls.append(op)
if op == 'get_shard_ranges':
sleep(0.1)
fake_repl_connection = attach_fake_replication_rpc(
self.rpc, replicate_hook=replicate_hook)
db_replicator.ReplConnection = fake_repl_connection
part, node = self._get_broker_part_node(remote_broker)
daemon = replicator.ContainerReplicator({'node_timeout': '0.001'})
daemon.logger = debug_logger()
with mock.patch.object(daemon.ring, 'get_part_nodes',
return_value=[node]), \
mock.patch.object(daemon, '_post_replicate_hook'):
success, _ = daemon._replicate_object(
part, broker.db_file, node['id'])
self.assertFalse(success)
# broker only has its own shard range so expect objects to be sync'd
self.assertEqual(['sync', 'get_shard_ranges'], replicate_calls)
error_lines = daemon.logger.get_lines_for_level('error')
self.assertIn('ERROR syncing /', error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(0, daemon.stats['diff'])
self.assertNotIn(
'diffs', daemon.logger.statsd_client.get_increment_counts())
self.assertEqual(1, daemon.stats['failure'])
self.assertEqual(
1, daemon.logger.statsd_client.get_increment_counts()['failures'])
def test_sync_shard_ranges_none_to_sync(self):
# verify that merge_shard_ranges is not sent if there are no shard
# ranges to sync
put_time = Timestamp.now().internal
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_time, POLICIES.default.idx)
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_time, POLICIES.default.idx)
# put an object into local broker
broker.put_object('obj', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG)
replicate_hook = mock.MagicMock()
fake_repl_connection = attach_fake_replication_rpc(
self.rpc, replicate_hook=replicate_hook)
db_replicator.ReplConnection = fake_repl_connection
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
daemon = replicator.ContainerReplicator({})
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# NB: remote has no shard ranges, so no call to get_shard_ranges
self.assertEqual(
['sync', 'merge_items', 'merge_syncs'],
[call[0][0] for call in replicate_hook.call_args_list])
def test_sync_shard_ranges_trouble_receiving_so_none_to_sync(self):
# verify that merge_shard_ranges is not sent if local has no shard
# ranges to sync
put_time = Timestamp.now().internal
broker = self._get_broker('a', 'c', node_index=0)
broker.initialize(put_time, POLICIES.default.idx)
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_broker.initialize(put_time, POLICIES.default.idx)
# ensure the remote has at least one shard range
remote_broker.enable_sharding(Timestamp.now())
# put an object into local broker
broker.put_object('obj', Timestamp.now().internal, 0, 'text/plain',
EMPTY_ETAG)
replicate_hook = mock.MagicMock()
fake_repl_connection = attach_fake_replication_rpc(
self.rpc, errors={'get_shard_ranges': [
FakeHTTPResponse(HTTPServerError())]},
replicate_hook=replicate_hook)
db_replicator.ReplConnection = fake_repl_connection
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
daemon = replicator.ContainerReplicator({})
success = daemon._repl_to_node(node, broker, part, info)
self.assertTrue(success)
# NB: remote had shard ranges, but there was... some sort of issue
# in getting them locally, so no call to merge_shard_ranges
self.assertEqual(
['sync', 'get_shard_ranges', 'merge_items', 'merge_syncs'],
[call[0][0] for call in replicate_hook.call_args_list])
def test_sync_shard_ranges_with_rsync(self):
broker = self._get_broker('a', 'c', node_index=0)
put_timestamp = time.time()
broker.initialize(put_timestamp, POLICIES.default.idx)
bounds = (('', 'g'), ('g', 'r'), ('r', ''))
shard_ranges = [
ShardRange('.shards_a/sr-%s' % upper, Timestamp.now(), lower,
upper, i + 1, 10 * (i + 1))
for i, (lower, upper) in enumerate(bounds)
]
# add first shard range
own_sr = broker.enable_sharding(Timestamp.now())
broker.merge_shard_ranges(shard_ranges[:1])
# "replicate"
part, node = self._get_broker_part_node(broker)
daemon = self._run_once(node)
self.assertEqual(2, daemon.stats['rsync'])
# complete rsync to all other nodes
def check_replicate(expected_ranges):
for i in range(1, 3):
remote_broker = self._get_broker('a', 'c', node_index=i)
self.assertTrue(os.path.exists(remote_broker.db_file))
self.assertShardRangesEqual(
expected_ranges,
remote_broker.get_shard_ranges(include_deleted=True,
include_own=True)
)
remote_info = remote_broker.get_info()
local_info = self._get_broker(
'a', 'c', node_index=0).get_info()
for k, v in local_info.items():
if k == 'id':
continue
if k == 'hash':
self.assertEqual(remote_info[k], '0' * 32)
continue
if k == 'object_count':
self.assertEqual(remote_info[k], 0)
continue
self.assertEqual(remote_info[k], v,
"mismatch remote %s %r != %r" % (
k, remote_info[k], v))
check_replicate([shard_ranges[0], own_sr])
# delete and add some more shard ranges
shard_ranges[0].deleted = 1
shard_ranges[0].timestamp = Timestamp.now()
for shard_range in shard_ranges:
broker.merge_shard_ranges(shard_range)
daemon = self._run_once(node)
self.assertEqual(2, daemon.stats['deferred'])
check_replicate(shard_ranges + [own_sr])
def check_replicate(self, from_broker, remote_node_index, repl_conf=None,
expect_success=True):
repl_conf = repl_conf or {}
repl_calls = []
rsync_calls = []
def repl_hook(op, *sync_args):
repl_calls.append((op, sync_args))
fake_repl_connection = attach_fake_replication_rpc(
self.rpc, replicate_hook=repl_hook, errors=None)
db_replicator.ReplConnection = fake_repl_connection
daemon = replicator.ContainerReplicator(
repl_conf, logger=debug_logger())
self._install_fake_rsync_file(daemon, rsync_calls)
part, nodes = self._ring.get_nodes(from_broker.account,
from_broker.container)
def find_node(node_index):
for node in nodes:
if node['index'] == node_index:
return node
else:
self.fail('Failed to find node index %s' % remote_node_index)
remote_node = find_node(remote_node_index)
info = from_broker.get_replication_info()
success = daemon._repl_to_node(remote_node, from_broker, part, info)
self.assertEqual(expect_success, success)
return daemon, repl_calls, rsync_calls
def assert_synced_shard_ranges(self, expected, synced_items):
expected.sort(key=lambda sr: (sr.lower, sr.upper))
for item in synced_items:
item.pop('record_type', None)
self.assertEqual([dict(ex) for ex in expected], synced_items)
def assert_info_synced(self, local, remote_node_index, mismatches=None):
mismatches = mismatches or []
mismatches.append('id')
remote = self._get_broker(local.account, local.container,
node_index=remote_node_index)
local_info = local.get_info()
remote_info = remote.get_info()
errors = []
for k, v in local_info.items():
if remote_info.get(k) == v:
if k in mismatches:
errors.append(
"unexpected match remote %s %r == %r" % (
k, remote_info[k], v))
continue
else:
if k not in mismatches:
errors.append(
"unexpected mismatch remote %s %r != %r" % (
k, remote_info[k], v))
if errors:
self.fail('Found sync errors:\n' + '\n'.join(errors))
def assert_shard_ranges_synced(self, local_broker, remote_broker):
self.assertShardRangesEqual(
local_broker.get_shard_ranges(include_deleted=True,
include_own=True),
remote_broker.get_shard_ranges(include_deleted=True,
include_own=True)
)
def _setup_replication_test(self, node_index):
ts_iter = make_timestamp_iter()
policy_idx = POLICIES.default.idx
put_timestamp = Timestamp.now().internal
# create "local" broker
broker = self._get_broker('a', 'c', node_index=node_index)
broker.initialize(put_timestamp, policy_idx)
objs = [{'name': 'blah%03d' % i, 'created_at': next(ts_iter).internal,
'size': i, 'content_type': 'text/plain', 'etag': 'etag%s' % i,
'deleted': 0, 'storage_policy_index': policy_idx}
for i in range(20)]
bounds = (('', 'a'), ('a', 'b'), ('b', 'c'), ('c', ''))
shard_ranges = [
ShardRange(
'.sharded_a/sr-%s' % upper, Timestamp.now(), lower, upper)
for i, (lower, upper) in enumerate(bounds)
]
return {'broker': broker,
'objects': objs,
'shard_ranges': shard_ranges}
def _merge_object(self, broker, objects, index, **kwargs):
if not isinstance(index, slice):
index = slice(index, index + 1)
objs = [dict(obj) for obj in objects[index]]
broker.merge_items(objs)
def _merge_shard_range(self, broker, shard_ranges, index, **kwargs):
broker.merge_shard_ranges(shard_ranges[index:index + 1])
def _goto_sharding_state(self, broker, epoch):
broker.enable_sharding(epoch)
self.assertTrue(broker.set_sharding_state())
self.assertEqual(backend.SHARDING, broker.get_db_state())
def _goto_sharded_state(self, broker):
self.assertTrue(broker.set_sharded_state())
self.assertEqual(backend.SHARDED, broker.get_db_state())
def _assert_local_sharded_in_sync(self, local_broker, local_id):
daemon, repl_calls, rsync_calls = self.check_replicate(local_broker, 1)
self.assertEqual(['sync', 'get_shard_ranges', 'merge_shard_ranges'],
[call[0] for call in repl_calls])
self.assertEqual(1, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['rsync'])
self.assertEqual(0, daemon.stats['diff'])
self.assertFalse(rsync_calls)
# new db sync
self.assertEqual(local_id, repl_calls[0][1][2])
# ...but we still get a merge_shard_ranges for shard ranges
self.assert_synced_shard_ranges(
local_broker.get_shard_ranges(include_own=True),
repl_calls[2][1][0])
self.assertEqual(local_id, repl_calls[2][1][1])
def _check_only_shard_ranges_replicated(self, local_broker,
remote_node_index,
repl_conf,
expected_shard_ranges,
remote_has_shards=True,
expect_success=True):
# expected_shard_ranges is expected final list of sync'd ranges
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, remote_node_index, repl_conf,
expect_success=expect_success)
# we always expect only shard ranges to end in abort
self.assertEqual(1, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['diff'])
self.assertEqual(0, daemon.stats['rsync'])
if remote_has_shards:
exp_calls = ['sync', 'get_shard_ranges', 'merge_shard_ranges']
else:
exp_calls = ['sync', 'merge_shard_ranges']
self.assertEqual(exp_calls, [call[0] for call in repl_calls])
self.assertFalse(rsync_calls)
# sync
local_id = local_broker.get_info()['id']
self.assertEqual(local_id, repl_calls[0][1][2])
# get_shard_ranges
if remote_has_shards:
self.assertEqual((), repl_calls[1][1])
# merge_shard_ranges for sending local shard ranges
self.assertShardRangesEqual(expected_shard_ranges,
repl_calls[-1][1][0])
self.assertEqual(local_id, repl_calls[-1][1][1])
remote_broker = self._get_broker(
local_broker.account, local_broker.container, node_index=1)
self.assertNotEqual(local_id, remote_broker.get_info()['id'])
self.assert_shard_ranges_synced(remote_broker, local_broker)
def test_replication_local_unsharded_remote_missing(self):
context = self._setup_replication_test(0)
local_broker = context['broker']
local_id = local_broker.get_info()['id']
objs = context['objects']
self._merge_object(index=0, **context)
daemon, repl_calls, rsync_calls = self.check_replicate(local_broker, 1)
self.assert_info_synced(local_broker, 1)
self.assertEqual(1, daemon.stats['rsync'])
self.assertEqual(['sync', 'complete_rsync'],
[call[0] for call in repl_calls])
self.assertEqual(local_id, repl_calls[1][1][0])
self.assertEqual(os.path.basename(local_broker.db_file),
repl_calls[1][1][1])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_id, os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assert_shard_ranges_synced(local_broker, remote_broker)
self.assertTrue(os.path.exists(remote_broker._db_file))
self.assertNotEqual(local_id, remote_broker.get_info()['id'])
self.assertEqual(objs[:1], remote_broker.get_objects())
def _check_replication_local_unsharded_remote_sharded(self, repl_conf):
context = self._setup_replication_test(0)
local_broker = context['broker']
local_id = local_broker.get_info()['id']
self._merge_object(index=slice(0, 6), **context)
remote_context = self._setup_replication_test(1)
self._merge_object(index=4, **remote_context)
remote_broker = remote_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(remote_broker, epoch=epoch)
remote_context['shard_ranges'][0].object_count = 101
remote_context['shard_ranges'][0].bytes_used = 1010
remote_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **remote_context)
self._merge_object(index=5, **remote_context)
self._goto_sharded_state(remote_broker)
self.assertEqual(backend.SHARDED, remote_broker.get_db_state())
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
remote_broker.get_shard_ranges(include_own=True))
remote_broker = self._get_broker(
local_broker.account, local_broker.container, node_index=1)
self.assertEqual(backend.SHARDED, remote_broker.get_db_state())
self.assertFalse(os.path.exists(remote_broker._db_file))
self.assertNotEqual(local_id, remote_broker.get_info()['id'])
self.assertEqual(remote_context['objects'][5:6],
remote_broker.get_objects())
# Now that we have shard ranges, we're never considered in-sync :-/
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
remote_broker.get_shard_ranges(include_own=True))
def test_replication_local_unsharded_remote_sharded(self):
self._check_replication_local_unsharded_remote_sharded({})
def test_replication_local_unsharded_remote_sharded_large_diff(self):
self._check_replication_local_unsharded_remote_sharded({'per_diff': 1})
def _check_replication_local_sharding_remote_missing(self, repl_conf):
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
self._merge_object(index=0, **local_context)
self._merge_object(index=1, **local_context)
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(2, 8), **local_context)
objs = local_context['objects']
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, 1, repl_conf=repl_conf)
self.assertEqual(['sync', 'complete_rsync'],
[call[0] for call in repl_calls])
self.assertEqual(1, daemon.stats['rsync'])
self.assertEqual(0, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['diff'])
# fresh db is sync'd first...
fresh_id = local_broker.get_info()['id']
self.assertEqual(fresh_id, repl_calls[0][1][2])
self.assertEqual(fresh_id, repl_calls[1][1][0])
# retired db is not sync'd at all
old_broker = self.backend(
local_broker._db_file, account=local_broker.account,
container=local_broker.container, force_db_file=True)
old_id = old_broker.get_info()['id']
bad_calls = []
for call in repl_calls:
if old_id in call[1]:
bad_calls.append(
'old db id %r in %r call args %r' % (
old_id, call[0], call[1]))
if bad_calls:
self.fail('Found some bad calls:\n' + '\n'.join(bad_calls))
# complete_rsync
self.assertEqual(os.path.basename(local_broker.db_file),
repl_calls[1][1][1])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(fresh_id, os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
# TODO: make these stats better; in sharding state local broker pulls
# stats for 2 objects from old db, whereas remote thinks it's sharded
# and has an empty shard range table
self.assert_info_synced(local_broker, 1, mismatches=[
'object_count', 'bytes_used', 'db_state'])
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_id = remote_broker.get_info()['id']
self.assertNotEqual(old_id, remote_id)
self.assertNotEqual(fresh_id, remote_id)
self.assertEqual(
[remote_broker.db_file], get_db_files(remote_broker.db_file))
self.assertEqual(os.path.basename(remote_broker.db_file),
os.path.basename(local_broker.db_file))
self.assertEqual(epoch, remote_broker.db_epoch)
# remote db has only the misplaced objects
self.assertEqual(objs[2:8], remote_broker.get_objects())
self.assert_shard_ranges_synced(local_broker, remote_broker)
# replicate again, check asserts abort
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True))
# sanity
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assertEqual(
[remote_broker.db_file], get_db_files(remote_broker.db_file))
self.assertEqual(os.path.basename(remote_broker.db_file),
os.path.basename(local_broker.db_file))
self.assertEqual(objs[2:8], remote_broker.get_objects())
self.assertEqual(epoch, remote_broker.db_epoch)
def test_replication_local_sharding_remote_missing(self):
self._check_replication_local_sharding_remote_missing({})
def test_replication_local_sharding_remote_missing_large_diff(self):
# the local shard db has large diff with respect to the old db
self._check_replication_local_sharding_remote_missing({'per_diff': 1})
def _check_replication_local_sharding_remote_unsharded(self, repl_conf):
local_context = self._setup_replication_test(0)
self._merge_object(index=slice(0, 3), **local_context)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(3, 11), **local_context)
remote_context = self._setup_replication_test(1)
self._merge_object(index=11, **remote_context)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True),
remote_has_shards=False)
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assertEqual(
[remote_broker._db_file], get_db_files(remote_broker.db_file))
self.assertEqual(remote_context['objects'][11:12],
remote_broker.get_objects())
self.assert_info_synced(
local_broker, 1,
mismatches=['db_state', 'object_count', 'bytes_used',
'status_changed_at', 'hash'])
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True))
def test_replication_local_sharding_remote_unsharded(self):
self._check_replication_local_sharding_remote_unsharded({})
def test_replication_local_sharding_remote_unsharded_large_diff(self):
self._check_replication_local_sharding_remote_unsharded(
{'per_diff': 1})
def _check_only_sync(self, local_broker, remote_node_index, repl_conf):
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, remote_node_index, repl_conf,
expect_success=False)
# When talking to an old (pre-2.18.0) container server, abort
# replication when we're sharding or sharded. Wait for the
# rolling upgrade that's presumably in-progress to finish instead.
self.assertEqual(1, daemon.stats['deferred'])
self.assertEqual(0, daemon.stats['diff'])
self.assertEqual(0, daemon.stats['rsync'])
self.assertEqual(['sync'],
[call[0] for call in repl_calls])
self.assertFalse(rsync_calls)
lines = daemon.logger.get_lines_for_level('warning')
self.assertIn('unable to replicate shard ranges', lines[0])
self.assertIn('refusing to replicate objects', lines[1])
self.assertFalse(lines[2:])
# sync
local_id = local_broker.get_info()['id']
self.assertEqual(local_id, repl_calls[0][1][2])
remote_broker = self._get_broker(
local_broker.account, local_broker.container, node_index=1)
self.assertNotEqual(local_id, remote_broker.get_info()['id'])
self.assertEqual([], remote_broker.get_shard_ranges())
def _check_replication_local_sharding_remote_presharding(self, repl_conf):
local_context = self._setup_replication_test(0)
self._merge_object(index=slice(0, 3), **local_context)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(3, 11), **local_context)
remote_context = self._setup_replication_test(1)
self._merge_object(index=11, **remote_context)
orig_get_remote_info = \
replicator.ContainerReplicatorRpc._get_synced_replication_info
def presharding_get_remote_info(*args):
rinfo = orig_get_remote_info(*args)
del rinfo['shard_max_row']
return rinfo
with mock.patch('swift.container.replicator.'
'ContainerReplicatorRpc._get_synced_replication_info',
presharding_get_remote_info):
self._check_only_sync(local_broker, 1, repl_conf)
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assertEqual(
[remote_broker._db_file], get_db_files(remote_broker.db_file))
self.assertEqual(remote_context['objects'][11:12],
remote_broker.get_objects())
self.assert_info_synced(
local_broker, 1,
mismatches=['db_state', 'object_count', 'bytes_used',
'status_changed_at', 'hash'])
self._check_only_sync(local_broker, 1, repl_conf)
def test_replication_local_sharding_remote_presharding(self):
self._check_replication_local_sharding_remote_presharding({})
def test_replication_local_sharding_remote_presharding_large_diff(self):
self._check_replication_local_sharding_remote_presharding(
{'per_diff': 1})
def _check_replication_local_sharding_remote_sharding(self, repl_conf):
local_context = self._setup_replication_test(0)
self._merge_object(index=slice(0, 5), **local_context)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(5, 10), **local_context)
remote_context = self._setup_replication_test(1)
self._merge_object(index=12, **remote_context)
# take snapshot of info now before transition to sharding...
orig_remote_info = remote_context['broker'].get_info()
remote_broker = remote_context['broker']
self._goto_sharding_state(remote_broker, epoch)
self._merge_shard_range(index=0, **remote_context)
self._merge_object(index=13, **remote_context)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
remote_broker.get_shard_ranges(include_own=True))
# in sharding state brokers only reports object stats from old db, and
# they are different
self.assert_info_synced(
local_broker, 1, mismatches=['object_count', 'bytes_used',
'status_changed_at', 'hash'])
remote_broker = self._get_broker('a', 'c', node_index=1)
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([remote_broker._db_file, shard_db],
get_db_files(remote_broker.db_file))
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([remote_broker._db_file, shard_db],
get_db_files(remote_broker.db_file))
# no local objects have been sync'd to remote shard db
self.assertEqual(remote_context['objects'][13:14],
remote_broker.get_objects())
# remote *old db* is unchanged
remote_old_broker = self.backend(
remote_broker._db_file, account=remote_broker.account,
container=remote_broker.container, force_db_file=True)
self.assertEqual(remote_context['objects'][12:13],
remote_old_broker.get_objects())
self.assertFalse(remote_old_broker.get_shard_ranges())
remote_old_info = remote_old_broker.get_info()
orig_remote_info.pop('db_state')
remote_old_info.pop('db_state')
self.assertEqual(orig_remote_info, remote_old_info)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True))
def test_replication_local_sharding_remote_sharding(self):
self._check_replication_local_sharding_remote_sharding({})
def test_replication_local_sharding_remote_sharding_large_diff(self):
self._check_replication_local_sharding_remote_sharding({'per_diff': 1})
def test_replication_local_sharded_remote_missing(self):
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
local_context['shard_ranges'][0].object_count = 99
local_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(0, 3), **local_context)
self._goto_sharded_state(local_broker)
objs = local_context['objects']
daemon, repl_calls, rsync_calls = self.check_replicate(local_broker, 1)
self.assertEqual(['sync', 'complete_rsync'],
[call[0] for call in repl_calls])
self.assertEqual(1, daemon.stats['rsync'])
# sync
local_id = local_broker.get_info()['id']
self.assertEqual(local_id, repl_calls[0][1][2])
# complete_rsync
self.assertEqual(local_id, repl_calls[1][1][0])
self.assertEqual(
os.path.basename(local_broker.db_file), repl_calls[1][1][1])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_id, os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
self.assert_info_synced(local_broker, 1)
remote_broker = self._get_broker('a', 'c', node_index=1)
remote_id = remote_broker.get_info()['id']
self.assertNotEqual(local_id, remote_id)
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([shard_db],
get_db_files(remote_broker.db_file))
self.assertEqual(objs[:3], remote_broker.get_objects())
self.assertEqual(local_broker.get_shard_ranges(),
remote_broker.get_shard_ranges())
# sanity check - in sync
self._assert_local_sharded_in_sync(local_broker, local_id)
remote_broker = self._get_broker('a', 'c', node_index=1)
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([shard_db],
get_db_files(remote_broker.db_file))
# the remote broker object_count comes from replicated shard range...
self.assertEqual(99, remote_broker.get_info()['object_count'])
# these are replicated misplaced objects...
self.assertEqual(objs[:3], remote_broker.get_objects())
self.assertEqual(local_broker.get_shard_ranges(),
remote_broker.get_shard_ranges())
def _check_replication_local_sharded_remote_unsharded(self, repl_conf):
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
local_context['shard_ranges'][0].object_count = 99
local_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(0, 3), **local_context)
self._goto_sharded_state(local_broker)
remote_context = self._setup_replication_test(1)
self._merge_object(index=4, **remote_context)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True),
remote_has_shards=False,
expect_success=True)
# sharded broker takes object count from shard range whereas remote
# unsharded broker takes it from object table
self.assert_info_synced(
local_broker, 1,
mismatches=['db_state', 'object_count', 'bytes_used',
'status_changed_at', 'hash'])
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assertEqual([remote_broker._db_file],
get_db_files(remote_broker.db_file))
self.assertEqual(remote_context['objects'][4:5],
remote_broker.get_objects())
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
local_broker.get_shard_ranges(include_own=True),
# We just sent shards, so of course remote has some
remote_has_shards=True,
expect_success=True)
remote_broker = self._get_broker('a', 'c', node_index=1)
self.assertEqual([remote_broker._db_file],
get_db_files(remote_broker.db_file))
self.assertEqual(remote_context['objects'][4:5],
remote_broker.get_objects())
def test_replication_local_sharded_remote_unsharded(self):
self._check_replication_local_sharded_remote_unsharded({})
def test_replication_local_sharded_remote_unsharded_large_diff(self):
self._check_replication_local_sharded_remote_unsharded({'per_diff': 1})
def _check_replication_local_sharded_remote_sharding(self, repl_conf):
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch=epoch)
local_context['shard_ranges'][0].object_count = 99
local_context['shard_ranges'][0].bytes_used = 999
local_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(0, 5), **local_context)
self._goto_sharded_state(local_broker)
remote_context = self._setup_replication_test(1)
self._merge_object(index=6, **remote_context)
remote_broker = remote_context['broker']
remote_info_orig = remote_broker.get_info()
self._goto_sharding_state(remote_broker, epoch=epoch)
self._merge_shard_range(index=0, **remote_context)
self._merge_object(index=7, **remote_context)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
# remote has newer timestamp for shard range
remote_broker.get_shard_ranges(include_own=True),
expect_success=True)
# sharded broker takes object count from shard range whereas remote
# sharding broker takes it from object table
self.assert_info_synced(
local_broker, 1,
mismatches=['db_state', 'object_count', 'bytes_used',
'status_changed_at', 'hash'])
remote_broker = self._get_broker('a', 'c', node_index=1)
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([remote_broker._db_file, shard_db],
get_db_files(remote_broker.db_file))
# remote fresh db objects are unchanged
self.assertEqual(remote_context['objects'][7:8],
remote_broker.get_objects())
# remote old hash.db objects are unchanged
remote_old_broker = self.backend(
remote_broker._db_file, account=remote_broker.account,
container=remote_broker.container, force_db_file=True)
self.assertEqual(
remote_context['objects'][6:7],
remote_old_broker.get_objects())
remote_info = remote_old_broker.get_info()
remote_info_orig.pop('db_state')
remote_info.pop('db_state')
self.assertEqual(remote_info_orig, remote_info)
self.assertEqual(local_broker.get_shard_ranges(),
remote_broker.get_shard_ranges())
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
remote_broker.get_shard_ranges(include_own=True),
expect_success=True)
def test_replication_local_sharded_remote_sharding(self):
self._check_replication_local_sharded_remote_sharding({})
def test_replication_local_sharded_remote_sharding_large_diff(self):
self._check_replication_local_sharded_remote_sharding({'per_diff': 1})
def _check_replication_local_sharded_remote_sharded(self, repl_conf):
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
epoch = Timestamp.now()
self._goto_sharding_state(local_broker, epoch)
local_context['shard_ranges'][0].object_count = 99
local_context['shard_ranges'][0].bytes_used = 999
local_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **local_context)
self._merge_object(index=slice(0, 6), **local_context)
self._goto_sharded_state(local_broker)
remote_context = self._setup_replication_test(1)
self._merge_object(index=6, **remote_context)
remote_broker = remote_context['broker']
self._goto_sharding_state(remote_broker, epoch)
remote_context['shard_ranges'][0].object_count = 101
remote_context['shard_ranges'][0].bytes_used = 1010
remote_context['shard_ranges'][0].state = ShardRange.ACTIVE
self._merge_shard_range(index=0, **remote_context)
self._merge_object(index=7, **remote_context)
self._goto_sharded_state(remote_broker)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
# remote has newer timestamp for shard range
remote_broker.get_shard_ranges(include_own=True),
expect_success=True)
self.assert_info_synced(
local_broker, 1,
mismatches=['status_changed_at', 'hash'])
remote_broker = self._get_broker('a', 'c', node_index=1)
shard_db = make_db_file_path(remote_broker._db_file, epoch)
self.assertEqual([shard_db],
get_db_files(remote_broker.db_file))
self.assertEqual(remote_context['objects'][7:8],
remote_broker.get_objects())
# remote shard range was newer than local so object count is not
# updated by sync'd shard range
self.assertEqual(
101, remote_broker.get_shard_ranges()[0].object_count)
self._check_only_shard_ranges_replicated(
local_broker, 1, repl_conf,
# remote has newer timestamp for shard range
remote_broker.get_shard_ranges(include_own=True),
expect_success=True)
def test_replication_local_sharded_remote_sharded(self):
self._check_replication_local_sharded_remote_sharded({})
def test_replication_local_sharded_remote_sharded_large_diff(self):
self._check_replication_local_sharded_remote_sharded({'per_diff': 1})
def test_replication_rsync_then_merge_aborts_before_merge_sharding(self):
# verify that rsync_then_merge aborts if remote starts sharding during
# the rsync
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
self._merge_object(index=slice(0, 3), **local_context)
remote_context = self._setup_replication_test(1)
remote_broker = remote_context['broker']
remote_broker.logger = debug_logger()
self._merge_object(index=5, **remote_context)
orig_func = replicator.ContainerReplicatorRpc.rsync_then_merge
def mock_rsync_then_merge(*args):
remote_broker.merge_shard_ranges(
ShardRange('.shards_a/cc', Timestamp.now()))
self._goto_sharding_state(remote_broker, Timestamp.now())
return orig_func(*args)
with mock.patch(
'swift.container.replicator.ContainerReplicatorRpc.'
'rsync_then_merge',
mock_rsync_then_merge):
with mock.patch(
'swift.container.backend.ContainerBroker.'
'get_items_since') as mock_get_items_since:
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, 1, expect_success=False,
repl_conf={'per_diff': 1})
mock_get_items_since.assert_not_called()
# No call to get_shard_ranges because remote didn't have shard ranges
# when the sync arrived
self.assertEqual(['sync', 'rsync_then_merge'],
[call[0] for call in repl_calls])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_broker.get_info()['id'],
os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
def test_replication_rsync_then_merge_aborts_before_merge_sharded(self):
# verify that rsync_then_merge aborts if remote completes sharding
# during the rsync
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
self._merge_object(index=slice(0, 3), **local_context)
remote_context = self._setup_replication_test(1)
remote_broker = remote_context['broker']
remote_broker.logger = debug_logger()
self._merge_object(index=5, **remote_context)
orig_func = replicator.ContainerReplicatorRpc.rsync_then_merge
def mock_rsync_then_merge(*args):
remote_broker.merge_shard_ranges(
ShardRange('.shards_a/cc', Timestamp.now()))
self._goto_sharding_state(remote_broker, Timestamp.now())
self._goto_sharded_state(remote_broker)
return orig_func(*args)
with mock.patch(
'swift.container.replicator.ContainerReplicatorRpc.'
'rsync_then_merge',
mock_rsync_then_merge):
with mock.patch(
'swift.container.backend.ContainerBroker.'
'get_items_since') as mock_get_items_since:
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, 1, expect_success=False,
repl_conf={'per_diff': 1})
mock_get_items_since.assert_not_called()
# No call to get_shard_ranges because remote didn't have shard ranges
# when the sync arrived
self.assertEqual(['sync', 'rsync_then_merge'],
[call[0] for call in repl_calls])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_broker.get_info()['id'],
os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
def test_replication_rsync_then_merge_aborts_after_merge_sharding(self):
# verify that rsync_then_merge aborts if remote starts sharding during
# the merge
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
self._merge_object(index=slice(0, 3), **local_context)
remote_context = self._setup_replication_test(1)
remote_broker = remote_context['broker']
remote_broker.logger = debug_logger()
self._merge_object(index=5, **remote_context)
orig_get_items_since = backend.ContainerBroker.get_items_since
calls = []
def fake_get_items_since(broker, *args):
# remote starts sharding while rpc call is merging
if not calls:
remote_broker.merge_shard_ranges(
ShardRange('.shards_a/cc', Timestamp.now()))
self._goto_sharding_state(remote_broker, Timestamp.now())
calls.append(args)
return orig_get_items_since(broker, *args)
to_patch = 'swift.container.backend.ContainerBroker.get_items_since'
with mock.patch(to_patch, fake_get_items_since), \
mock.patch('swift.common.db_replicator.sleep'), \
mock.patch('swift.container.backend.tpool.execute',
lambda func, *args: func(*args)):
# For some reason, on py3 we start popping Timeouts
# if we let eventlet trampoline...
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, 1, expect_success=False,
repl_conf={'per_diff': 1})
self.assertEqual(['sync', 'rsync_then_merge'],
[call[0] for call in repl_calls])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_broker.get_info()['id'],
os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
def test_replication_rsync_then_merge_aborts_after_merge_sharded(self):
# verify that rsync_then_merge aborts if remote completes sharding
# during the merge
local_context = self._setup_replication_test(0)
local_broker = local_context['broker']
self._merge_object(index=slice(0, 3), **local_context)
remote_context = self._setup_replication_test(1)
remote_broker = remote_context['broker']
remote_broker.logger = debug_logger()
self._merge_object(index=5, **remote_context)
orig_get_items_since = backend.ContainerBroker.get_items_since
calls = []
def fake_get_items_since(broker, *args):
# remote starts sharding while rpc call is merging
result = orig_get_items_since(broker, *args)
if calls:
remote_broker.merge_shard_ranges(
ShardRange('.shards_a/cc', Timestamp.now()))
self._goto_sharding_state(remote_broker, Timestamp.now())
self._goto_sharded_state(remote_broker)
calls.append(args)
return result
to_patch = 'swift.container.backend.ContainerBroker.get_items_since'
with mock.patch(to_patch, fake_get_items_since), \
mock.patch('swift.common.db_replicator.sleep'), \
mock.patch('swift.container.backend.tpool.execute',
lambda func, *args: func(*args)):
# For some reason, on py3 we start popping Timeouts
# if we let eventlet trampoline...
daemon, repl_calls, rsync_calls = self.check_replicate(
local_broker, 1, expect_success=False,
repl_conf={'per_diff': 1})
self.assertEqual(['sync', 'rsync_then_merge'],
[call[0] for call in repl_calls])
self.assertEqual(local_broker.db_file, rsync_calls[0][0])
self.assertEqual(local_broker.get_info()['id'],
os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
@mock.patch('swift.common.ring.ring.Ring.get_part_nodes', return_value=[])
def test_find_local_handoff_for_part(self, mock_part_nodes):
with mock.patch(
'swift.common.db_replicator.ring.Ring',
return_value=self._ring):
daemon = replicator.ContainerReplicator({}, logger=self.logger)
# First let's assume we find a primary node
ring_node1, ring_node2, ring_node3 = daemon.ring.devs[-3:]
mock_part_nodes.return_value = [ring_node1, ring_node2]
daemon._local_device_ids = {ring_node1['id']: ring_node1,
ring_node3['id']: ring_node3}
node = daemon.find_local_handoff_for_part(0)
self.assertEqual(node['id'], ring_node1['id'])
# And if we can't find one from the primaries get *some* local device
mock_part_nodes.return_value = []
daemon._local_device_ids = {ring_node3['id']: ring_node3}
node = daemon.find_local_handoff_for_part(0)
self.assertEqual(node['id'], ring_node3['id'])
# if there are more then 1 local_dev_id it'll randomly pick one, but
# not a zero-weight device
ring_node3['weight'] = 0
selected_node_ids = set()
local_dev_ids = {dev['id']: dev for dev in daemon.ring.devs[-3:]}
daemon._local_device_ids = local_dev_ids
for _ in range(15):
node = daemon.find_local_handoff_for_part(0)
self.assertIn(node['id'], local_dev_ids)
selected_node_ids.add(node['id'])
if len(selected_node_ids) == 3:
break # unexpected
self.assertEqual(len(selected_node_ids), 2)
self.assertEqual([1, 1], [local_dev_ids[dev_id]['weight']
for dev_id in selected_node_ids])
warning_lines = self.logger.get_lines_for_level('warning')
self.assertFalse(warning_lines)
# ...unless all devices have zero-weight
ring_node3['weight'] = 0
ring_node2['weight'] = 0
selected_node_ids = set()
local_dev_ids = {dev['id']: dev for dev in daemon.ring.devs[-2:]}
daemon._local_device_ids = local_dev_ids
for _ in range(15):
self.logger.clear()
node = daemon.find_local_handoff_for_part(0)
self.assertIn(node['id'], local_dev_ids)
selected_node_ids.add(node['id'])
if len(selected_node_ids) == 2:
break # expected
self.assertEqual(len(selected_node_ids), 2)
self.assertEqual([0, 0], [local_dev_ids[dev_id]['weight']
for dev_id in selected_node_ids])
warning_lines = self.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warning_lines), warning_lines)
self.assertIn(
'Could not find a non-zero weight device for handoff partition',
warning_lines[0])
# If there are also no local_dev_ids, then we'll get the RuntimeError
daemon._local_device_ids = {}
with self.assertRaises(RuntimeError) as dev_err:
daemon.find_local_handoff_for_part(0)
expected_error_string = 'Cannot find local handoff; no local devices'
self.assertEqual(str(dev_err.exception), expected_error_string)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_replicator.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numbers
import shutil
from tempfile import mkdtemp
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
import eventlet
from collections import defaultdict
from datetime import datetime
import six
from six.moves import urllib
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
from swift.common.swob import Request
from swift.container import reconciler
from swift.container.server import gen_resp_headers, ContainerController
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path, Timestamp, encode_timestamps, mkdirs
from test.debug_logger import debug_logger
from test.unit import FakeRing, fake_http_connect, patch_policies, \
DEFAULT_TEST_EC_TYPE, make_timestamp_iter
from test.unit.common.middleware import helpers
def timestamp_to_last_modified(timestamp):
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(helpers.FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings=None):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.use_replication_network = True
self.parse(listings)
self.container_ring = FakeRing()
def parse(self, listings):
listings = listings or {}
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
if six.PY2 and isinstance(path, six.text_type):
path = path.encode('utf-8')
account, container_name, obj_name = split_path(
path, 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = swob.str_to_wsgi(
container_path + '/' + obj_name)
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
# some tests setup mock listings using floats, some use
# strings, so normalize here
if isinstance(timestamp, numbers.Number):
timestamp = '%f' % timestamp
if six.PY2:
obj_name = obj_name.decode('utf-8')
timestamp = timestamp.decode('utf-8')
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name,
'last_modified': last_modified,
'hash': timestamp,
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = helpers.normalize_query_string(
'?format=json&marker=&end_marker=&prefix=')
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = helpers.normalize_query_string(
'?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(obj_name.encode('utf-8'))))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker=&prefix='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
self.tempdir = mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# the 'hash' field in object listing has the raw 'created_at' value
# which could be a composite of timestamps
timestamp_str = encode_timestamps(Timestamp(1234.20190),
Timestamp(1245.20190),
Timestamp(1256.20190),
explicit=True)
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': timestamp_str,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertIsNone(oldest_spi)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp.now().internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two')])
def test_get_container_policy_index_for_recently_split_recreated(self):
# verify that get_container_policy_index reaches same conclusion as a
# container server that receives all requests in chronological order
ts_iter = make_timestamp_iter()
ts = [next(ts_iter) for _ in range(8)]
# make 3 container replicas
device_dirs = [os.path.join(self.tempdir, str(i)) for i in range(3)]
for device_dir in device_dirs:
mkdirs(os.path.join(device_dir, 'sda1'))
controllers = [ContainerController(
{'devices': devices,
'mount_check': 'false',
'replication_server': 'true'})
for devices in device_dirs]
# initial PUT goes to all 3 replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[0].internal,
'X-Backend-Storage-Policy-Index': 0,
})
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[201, 201, 201])
# DELETE to all 3 replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts[2].internal,
})
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[204, 204, 204])
# first recreate PUT, SPI=1, goes to replicas 0 and 1
responses = []
for controller in controllers[:2]:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[3].internal,
'X-Backend-Storage-Policy-Index': 1,
})
responses.append(req.get_response(controller))
# all ok, PUT follows DELETE
self.assertEqual([resp.status_int for resp in responses],
[201, 201])
# second recreate PUT, SPI=2, goes to replicas 0 and 2
responses = []
for controller in [controllers[0], controllers[2]]:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[5].internal,
'X-Backend-Storage-Policy-Index': 2,
})
responses.append(req.get_response(controller))
# note: 409 from replica 0 because PUT follows previous PUT
self.assertEqual([resp.status_int for resp in responses],
[409, 201])
# now do a HEAD on all replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='HEAD')
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[204, 204, 204])
resp_headers = [resp.headers for resp in responses]
# replica 0 should be authoritative because it received all requests
self.assertEqual(ts[3].internal, resp_headers[0]['X-Put-Timestamp'])
self.assertEqual('1',
resp_headers[0]['X-Backend-Storage-Policy-Index'])
self.assertEqual(ts[3].internal, resp_headers[1]['X-Put-Timestamp'])
self.assertEqual('1',
resp_headers[1]['X-Backend-Storage-Policy-Index'])
self.assertEqual(ts[5].internal, resp_headers[2]['X-Put-Timestamp'])
self.assertEqual('2',
resp_headers[2]['X-Backend-Storage-Policy-Index'])
# now feed the headers from each replica to
# direct_get_container_policy_index
mock_path = 'swift.container.reconciler.direct_head_container'
random.shuffle(resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expect the same outcome as the authoritative replica 0
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertIsNone(oldest_spi)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp.now()
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6201, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertIsNone(rv)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return helpers.normalize_query_string(
"?format=json&marker=%s&end_marker=&prefix=" %
urllib.parse.quote(marker.encode('utf-8')))
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
ECStoragePolicy(1, 'one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2), ],
fake_ring_args=[{}, {'replicas': 8}])
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
self.swift = FakeInternalClient()
self.reconciler = reconciler.ContainerReconciler(
conf, logger=self.logger, swift=self.swift)
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def test_concurrency_config(self):
conf = {}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 1)
conf = {'concurrency': '10'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 10)
conf = {'concurrency': 48}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 48)
conf = {'concurrency': 0}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'concurrency': '-1'}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
def test_processes_config(self):
conf = {}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 0)
self.assertEqual(r.processes, 0)
conf = {'processes': '1'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 0)
self.assertEqual(r.processes, 1)
conf = {'processes': 10, 'process': '9'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 9)
self.assertEqual(r.processes, 10)
conf = {'processes': -1}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'process': -1}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'processes': 9, 'process': 9}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
def test_init_internal_client_log_name(self):
def _do_test_init_ic_log_name(conf, exp_internal_client_log_name):
with mock.patch(
'swift.container.reconciler.InternalClient') \
as mock_ic:
reconciler.ContainerReconciler(conf)
mock_ic.assert_called_once_with(
'/etc/swift/container-reconciler.conf',
'Swift Container Reconciler', 3,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
_do_test_init_ic_log_name({}, 'container-reconciler-ic')
_do_test_init_ic_log_name({'log_name': 'my-container-reconciler'},
'my-container-reconciler-ic')
def _mock_listing(self, objects):
self.swift.parse(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', lambda: next(mock_time_iter)):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_no_concurrency(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o2"): 3724.23456,
(1, "/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o2"): 3724.23456,
})
order_recieved = []
def fake_reconcile_object(account, container, obj, q_policy_index,
q_ts, q_op, path, **kwargs):
order_recieved.append(obj)
return True
self.reconciler._reconcile_object = fake_reconcile_object
self.assertEqual(self.reconciler.concurrency, 1) # sanity
deleted_container_entries = self._run_once()
self.assertEqual(order_recieved, ['o1', 'o2'])
# process in order recieved
self.assertEqual(deleted_container_entries, [
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1'),
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o2'),
])
def test_concurrency(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o2"): 3724.23456,
(1, "/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o2"): 3724.23456,
})
order_recieved = []
def fake_reconcile_object(account, container, obj, q_policy_index,
q_ts, q_op, path, **kwargs):
order_recieved.append(obj)
if obj == 'o1':
# o1 takes longer than o2 for some reason
for i in range(10):
eventlet.sleep(0.0)
return True
self.reconciler._reconcile_object = fake_reconcile_object
self.reconciler.concurrency = 2
deleted_container_entries = self._run_once()
self.assertEqual(order_recieved, ['o1', 'o2'])
# ... and so we finish o2 first
self.assertEqual(deleted_container_entries, [
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o2'),
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1'),
])
def test_multi_process_should_process(self):
def mkqi(a, c, o):
"make queue item"
return {
'account': a,
'container': c,
'obj': o,
}
queue = [
mkqi('a', 'c', 'o1'),
mkqi('a', 'c', 'o2'),
mkqi('a', 'c', 'o3'),
mkqi('a', 'c', 'o4'),
]
def map_should_process(process, processes):
self.reconciler.process = process
self.reconciler.processes = processes
with mock.patch('swift.common.utils.HASH_PATH_SUFFIX',
b'endcap'), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
return [self.reconciler.should_process(q_item)
for q_item in queue]
def check_process(process, processes, expected):
should_process = map_should_process(process, processes)
try:
self.assertEqual(should_process, expected)
except AssertionError as e:
self.fail('unexpected items processed for %s/%s\n%s' % (
process, processes, e))
check_process(0, 0, [True] * 4)
check_process(0, 1, [True] * 4)
check_process(0, 2, [False, True, False, False])
check_process(1, 2, [True, False, True, True])
check_process(0, 4, [False, True, False, False])
check_process(1, 4, [True, False, False, False])
check_process(2, 4, [False] * 4) # lazy
check_process(3, 4, [False, False, True, True])
queue = [mkqi('a%s' % i, 'c%s' % i, 'o%s' % i) for i in range(1000)]
items_handled = [0] * 1000
for process in range(100):
should_process = map_should_process(process, 100)
for i, handled in enumerate(should_process):
if handled:
items_handled[i] += 1
self.assertEqual([1] * 1000, items_handled)
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
ECStoragePolicy(2, 'two', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2)],
fake_ring_args=[
{'next_part_power': 1}, {}, {'next_part_power': 1}])
def test_can_reconcile_policy(self):
for policy_index, expected in ((0, False), (1, True), (2, False),
(3, False), ('apple', False),
(None, False)):
self.assertEqual(
self.reconciler.can_reconcile_policy(policy_index), expected)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
ECStoragePolicy(1, 'one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2), ],
fake_ring_args=[{'next_part_power': 1}, {}])
def test_fail_to_move_if_ppi(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# skipped sending because policy_index 0 is in the middle of a PPI
self.assertFalse(deleted_container_entries)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(self.reconciler.stats['ppi_skip'], 1)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
if six.PY2:
obj_path = obj_name.encode('utf-8')
else:
obj_path = obj_name.encode('utf-8').decode('latin-1')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_preflight(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the HEAD blow up
self.fake_swift.storage_policy[0].register(
'HEAD', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we did some listings...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# ...but we can't even tell whether anything's misplaced or not
self.assertEqual(self.reconciler.stats['misplaced_object'], 0)
self.assertEqual(self.reconciler.stats['unavailable_destination'], 1)
# so we don't try to do any sort of move or cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to try again later
self.assertEqual(self.reconciler.stats['retry'], 1)
self.assertEqual(self.fake_swift.storage_policy[1].calls, [])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 1, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp.now()) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/container/test_reconciler.py |
# Copyright (c) 2022 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift.common import registry, utils
import mock
import unittest
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
patcher = mock.patch.object(registry, '_swift_info', dict())
patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(registry, '_swift_admin_info', dict())
patcher.start()
self.addCleanup(patcher.stop)
def test_register_swift_info(self):
registry.register_swift_info(foo='bar')
registry.register_swift_info(lorem='ipsum')
registry.register_swift_info('cap1', cap1_foo='cap1_bar')
registry.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in registry._swift_info)
self.assertTrue('foo' in registry._swift_info['swift'])
self.assertEqual(registry._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in registry._swift_info['swift'])
self.assertEqual(registry._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in registry._swift_info)
self.assertTrue('cap1_foo' in registry._swift_info['cap1'])
self.assertEqual(registry._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in registry._swift_info['cap1'])
self.assertEqual(registry._swift_info['cap1']['cap1_lorem'],
'cap1_ipsum')
self.assertRaises(ValueError,
registry.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
registry.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
registry.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
registry.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
registry.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
registry._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = registry.get_swift_info()
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(registry._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(registry._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
registry._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = registry.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertNotIn('admin', info)
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_register_swift_admin_info(self):
registry.register_swift_info(admin=True, admin_foo='admin_bar')
registry.register_swift_info(admin=True, admin_lorem='admin_ipsum')
registry.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
registry.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertIn('swift', registry._swift_admin_info)
self.assertIn('admin_foo', registry._swift_admin_info['swift'])
self.assertEqual(
registry._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertIn('admin_lorem', registry._swift_admin_info['swift'])
self.assertEqual(
registry._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertIn('cap1', registry._swift_admin_info)
self.assertIn('ac1_foo', registry._swift_admin_info['cap1'])
self.assertEqual(
registry._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('ac1_lorem', registry._swift_admin_info['cap1'])
self.assertEqual(
registry._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertNotIn('swift', registry._swift_info)
self.assertNotIn('cap1', registry._swift_info)
def test_get_swift_admin_info(self):
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
registry._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = registry.get_swift_info(admin=True)
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(registry._swift_info['swift']['foo'], 'bar')
self.assertIn('cap1', info)
self.assertIn('cap1_foo', info['cap1'])
self.assertEqual(registry._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
registry._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = registry.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertIn('admin', info)
self.assertIn('admin_cap1', info['admin'])
self.assertIn('ac1_foo', info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertIn('disallowed_sections', info['admin'])
self.assertIn('cap1', info['admin']['disallowed_sections'])
self.assertNotIn('cap2', info['admin']['disallowed_sections'])
self.assertIn('cap3', info['admin']['disallowed_sections'])
self.assertIn('swift', info)
self.assertIn('foo', info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertNotIn('cap1', info)
self.assertIn('cap2', info)
self.assertIn('cap2_foo', info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertNotIn('cap3', info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
registry._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = registry.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertNotIn('cap3', info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertNotIn('cap1_foo', info['cap1'])
self.assertNotIn('c', info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
registry._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = registry.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
def test_register_swift_info_import_from_utils(self):
# verify that the functions are available to import from utils
utils.register_swift_info(foo='bar')
self.assertTrue('swift' in registry._swift_info)
self.assertTrue('foo' in registry._swift_info['swift'])
self.assertEqual(registry._swift_info['swift']['foo'], 'bar')
self.assertEqual(registry.get_swift_info(admin=True),
utils.get_swift_info(admin=True))
class TestSensitiveRegistry(unittest.TestCase):
def setUp(self):
patcher = mock.patch.object(registry, '_sensitive_headers', set())
patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch.object(registry, '_sensitive_params', set())
patcher.start()
self.addCleanup(patcher.stop)
def test_register_sensitive_header(self):
self.assertFalse(registry._sensitive_headers)
registry.register_sensitive_header('Some-Header')
expected_headers = {'some-header'}
self.assertEqual(expected_headers, registry._sensitive_headers)
expected_headers.add("new-header")
registry.register_sensitive_header("New-Header")
self.assertEqual(expected_headers, registry._sensitive_headers)
for header_not_str in (1, None, 1.1):
with self.assertRaises(TypeError):
registry.register_sensitive_header(header_not_str)
self.assertEqual(expected_headers, registry._sensitive_headers)
with self.assertRaises(UnicodeError):
registry.register_sensitive_header('\xe2\x98\x83')
self.assertEqual(expected_headers, registry._sensitive_headers)
def test_register_sensitive_param(self):
self.assertFalse(registry._sensitive_params)
registry.register_sensitive_param('some_param')
expected_params = {'some_param'}
self.assertEqual(expected_params, registry._sensitive_params)
expected_params.add("another")
registry.register_sensitive_param("another")
self.assertEqual(expected_params, registry._sensitive_params)
for param_not_str in (1, None, 1.1):
with self.assertRaises(TypeError):
registry.register_sensitive_param(param_not_str)
self.assertEqual(expected_params, registry._sensitive_params)
with self.assertRaises(UnicodeError):
registry.register_sensitive_param('\xe2\x98\x83')
self.assertEqual(expected_params, registry._sensitive_params)
def test_get_sensitive_headers(self):
self.assertFalse(registry.get_sensitive_headers())
registry.register_sensitive_header('Header1')
self.assertEqual(registry.get_sensitive_headers(), {'header1'})
self.assertEqual(registry.get_sensitive_headers(),
registry._sensitive_headers)
registry.register_sensitive_header('Header2')
self.assertEqual(registry.get_sensitive_headers(),
{'header1', 'header2'})
self.assertEqual(registry.get_sensitive_headers(),
registry._sensitive_headers)
def test_get_sensitive_params(self):
self.assertFalse(registry.get_sensitive_params())
registry.register_sensitive_param('Param1')
self.assertEqual(registry.get_sensitive_params(), {'Param1'})
self.assertEqual(registry.get_sensitive_params(),
registry._sensitive_params)
registry.register_sensitive_param('param')
self.assertEqual(registry.get_sensitive_params(),
{'Param1', 'param'})
self.assertEqual(registry.get_sensitive_params(),
registry._sensitive_params)
| swift-master | test/unit/common/test_registry.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
import hashlib
import itertools
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import temptree, make_timestamp_iter, with_tempdir, \
mock_timestamp_now, FakeIterable
import contextlib
import errno
import eventlet
import eventlet.debug
import eventlet.event
import eventlet.patcher
import functools
import grp
import logging
import os
import mock
import posix
import pwd
import random
import re
import socket
import string
import sys
import json
import math
import inspect
import warnings
import six
from six import StringIO
from six.moves.queue import Queue, Empty
from six.moves import http_client
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from io import BytesIO
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import set_swift_dir, md5, ShardRangeList, \
SwiftLogFormatter
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import POLICIES, reload_storage_policies
from swift.common.swob import Request, Response
from test.unit import requires_o_tmpfile_support_in_tmp, \
quiet_eventlet_exceptions
if six.PY2:
import eventlet.green.httplib as green_http_client
else:
import eventlet.green.http.client as green_http_client
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = args
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestUTC(unittest.TestCase):
def test_tzname(self):
self.assertEqual(utils.UTC.tzname(None), 'UTC')
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b'startcap'
self.md5_test_data = "Openstack forever".encode('utf-8')
try:
self.md5_digest = hashlib.md5(self.md5_test_data).hexdigest()
self.fips_enabled = False
except ValueError:
self.md5_digest = '0d6dc3c588ae71a04ce9a6beebbbba06'
self.fips_enabled = True
def test_monkey_patch(self):
def take_and_release(lock):
try:
lock.acquire()
finally:
lock.release()
def do_test():
res = 0
try:
# this module imports eventlet original threading, so re-import
# locally...
import threading
import traceback
logging_lock_before = logging._lock
my_lock_before = threading.RLock()
self.assertIsInstance(logging_lock_before,
type(my_lock_before))
utils.monkey_patch()
logging_lock_after = logging._lock
my_lock_after = threading.RLock()
self.assertIsInstance(logging_lock_after,
type(my_lock_after))
self.assertTrue(logging_lock_after.acquire())
thread = threading.Thread(target=take_and_release,
args=(logging_lock_after,))
thread.start()
self.assertTrue(thread.isAlive())
# we should timeout while the thread is still blocking on lock
eventlet.sleep()
thread.join(timeout=0.1)
self.assertTrue(thread.isAlive())
logging._lock.release()
thread.join(timeout=0.1)
self.assertFalse(thread.isAlive())
except AssertionError:
traceback.print_exc()
res = 1
finally:
os._exit(res)
pid = os.fork()
if pid == 0:
# run the test in an isolated environment to avoid monkey patching
# in this one
do_test()
else:
child_pid, errcode = os.waitpid(pid, 0)
self.assertEqual(0, os.WEXITSTATUS(errcode),
'Forked do_test failed')
def test_get_zero_indexed_base_string(self):
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', None),
'something')
self.assertEqual(utils.get_zero_indexed_base_string('something', 1),
'something-1')
self.assertRaises(ValueError, utils.get_zero_indexed_base_string,
'something', 'not_integer')
@with_tempdir
def test_lock_path(self, tmpdir):
# 2 locks with limit=1 must fail
success = False
with utils.lock_path(tmpdir, 0.1):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
# 2 locks with limit=2 must succeed
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
try:
with utils.lock_path(tmpdir, 0.1, limit=2):
success = True
except LockTimeout as exc:
self.fail('Unexpected exception %s' % exc)
self.assertTrue(success)
# 3 locks with limit=2 must fail
success = False
with utils.lock_path(tmpdir, 0.1, limit=2):
with utils.lock_path(tmpdir, 0.1, limit=2):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_invalid_limit(self, tmpdir):
success = False
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=0):
success = True
self.assertFalse(success)
with self.assertRaises(ValueError):
with utils.lock_path(tmpdir, 0.1, limit=-1):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit='1'):
success = True
self.assertFalse(success)
with self.assertRaises(TypeError):
with utils.lock_path(tmpdir, 0.1, limit=1.1):
success = True
self.assertFalse(success)
@with_tempdir
def test_lock_path_num_sleeps(self, tmpdir):
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
@with_tempdir
def test_lock_path_class(self, tmpdir):
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
@with_tempdir
def test_lock_path_name(self, tmpdir):
# With default limit (1), can't take the same named lock twice
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with self.assertRaises(LockTimeout):
with utils.lock_path(tmpdir, 0.1, name='foo'):
success = True
self.assertFalse(success)
# With default limit (1), can take two differently named locks
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with utils.lock_path(tmpdir, 0.1, name='bar'):
success = True
self.assertTrue(success)
# With default limit (1), can take a named lock and the default lock
success = False
with utils.lock_path(tmpdir, 0.1, name='foo'):
with utils.lock_path(tmpdir, 0.1):
success = True
self.assertTrue(success)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
with self.assertRaises(TypeError):
utils.normalize_delete_at_timestamp(None)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('')
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('abc')
def test_normalize_delete_at_timestamp_high_precision(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593, True),
'1253327593.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890, True),
'1253327593.67890')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593', True),
'1253327593.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890', True),
'1253327593.67890')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593, True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890, True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593', True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890', True),
'0000000000.00000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593, True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890, True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593', True),
'9999999999.99999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890', True),
'9999999999.99999')
with self.assertRaises(TypeError):
utils.normalize_delete_at_timestamp(None, True)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('', True)
with self.assertRaises(ValueError):
utils.normalize_delete_at_timestamp('abc', True)
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_drain_and_close(self):
utils.drain_and_close([])
utils.drain_and_close(iter([]))
drained = [False]
def gen():
yield 'x'
yield 'y'
drained[0] = True
utils.drain_and_close(gen())
self.assertTrue(drained[0])
utils.drain_and_close(Response(status=200, body=b'Some body'))
drained = [False]
utils.drain_and_close(Response(status=200, app_iter=gen()))
self.assertTrue(drained[0])
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = CrashyLogger()
logger.addHandler(handler)
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertGreater(crashy_calls[0], 1)
logger.removeHandler(handler)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertNotIn('once', options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key0': 99,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dict items are not sticky
submit_dict = {'key1': {'key2': {'value3': 3}}}
expect_dict = {'key0': 101,
'key1': {'key2': {'value3': 3},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# cached entries are sticky
submit_dict = {}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# nested dicts can be erased...
submit_dict = {'key1': {'key2': {}}}
expect_dict = {'key0': 101,
'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# top level dicts can be erased...
submit_dict = {'key1': {}}
expect_dict = {'key0': 101}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
# ... and erasure is idempotent
utils.dump_recon_cache(submit_dict, testcache_file, logger)
with open(testcache_file) as fd:
file_dict = json.loads(fd.readline())
self.assertEqual(expect_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_set_owner(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
_ret = lambda: None
_ret.pw_uid = 100
_mock_getpwnam = MagicMock(return_value=_ret)
_mock_chown = mock.Mock()
with patch('os.chown', _mock_chown), \
patch('pwd.getpwnam', _mock_getpwnam):
utils.dump_recon_cache(submit_dict, testcache_file,
logger, set_owner="swift")
_mock_getpwnam.assert_called_once_with("swift")
self.assertEqual(_mock_chown.call_args[0][1], 100)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_load_recon_cache(self):
stub_data = {'test': 'foo'}
with NamedTemporaryFile() as f:
f.write(json.dumps(stub_data).encode("utf-8"))
f.flush()
self.assertEqual(stub_data, utils.load_recon_cache(f.name))
# missing files are treated as empty
self.assertFalse(os.path.exists(f.name)) # sanity
self.assertEqual({}, utils.load_recon_cache(f.name))
# Corrupt files are treated as empty. We could crash and make an
# operator fix the corrupt file, but they'll "fix" it with "rm -f
# /var/cache/swift/*.recon", so let's just do it for them.
with NamedTemporaryFile() as f:
f.write(b"{not [valid (json")
f.flush()
self.assertEqual({}, utils.load_recon_cache(f.name))
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_name_and_route(self):
@contextlib.contextmanager
def add_log_handler(logger):
# install a handler to capture log messages formatted as per swift
sio = StringIO()
handler = logging.StreamHandler(sio)
handler.setFormatter(SwiftLogFormatter(
fmt="%(server)s: %(message)s", max_line_length=20)
)
logger.logger.addHandler(handler)
yield sio
logger.logger.removeHandler(handler)
logger = utils.get_logger({}, name='name', log_route='route')
# log_route becomes the LogAdapter.name and logging.Logger.name
self.assertEqual('route', logger.name)
self.assertEqual('route', logger.logger.name)
# name becomes the LogAdapter.server!
self.assertEqual('name', logger.server)
# LogAdapter.server is used when formatting a log message
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('name: testing\n', sio.getvalue())
logger = utils.get_logger({'log_name': 'conf-name'}, name='name',
log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('name', logger.server)
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('name: testing\n', sio.getvalue())
logger = utils.get_logger({'log_name': 'conf-name'}, log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('conf-name', logger.server)
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('conf-name: testing\n', sio.getvalue())
logger = utils.get_logger({'log_name': 'conf-name'})
self.assertEqual('conf-name', logger.name)
self.assertEqual('conf-name', logger.server)
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('conf-name: testing\n', sio.getvalue())
logger = utils.get_logger({})
self.assertEqual('swift', logger.name)
self.assertEqual('swift', logger.server)
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('swift: testing\n', sio.getvalue())
logger = utils.get_logger({}, log_route='route')
self.assertEqual('route', logger.name)
self.assertEqual('swift', logger.server)
with add_log_handler(logger) as sio:
logger.info('testing')
self.assertEqual('swift: testing\n', sio.getvalue())
# same log_route, different names...
logger1 = utils.get_logger({'log_statsd_host': '1.2.3.4'},
name='name1', log_route='route')
logger2 = utils.get_logger({'log_statsd_host': '1.2.3.5'},
name='name2', log_route='route')
self.assertEqual('route', logger1.name)
self.assertEqual('route', logger1.logger.name)
self.assertEqual('name1', logger1.server)
# oh dear, the statsd client on the common logging.Logger instance got
# mutated when logger2 was created
self.assertEqual('name2.', logger1.logger.statsd_client._prefix)
self.assertEqual('route', logger2.name)
self.assertEqual('route', logger2.logger.name)
self.assertEqual('name2', logger2.server)
self.assertEqual('name2.', logger2.logger.statsd_client._prefix)
self.assertIs(logger2.logger, logger1.logger)
with add_log_handler(logger1) as sio:
logger1.info('testing')
self.assertEqual('name1: testing\n', sio.getvalue())
with add_log_handler(logger2) as sio:
logger2.info('testing')
self.assertEqual('name2: testing\n', sio.getvalue())
# different log_route, different names...
logger1 = utils.get_logger({'log_statsd_host': '1.2.3.4'},
name='name1', log_route='route1')
logger2 = utils.get_logger({'log_statsd_host': '1.2.3.5'},
name='name2', log_route='route2')
self.assertEqual('route1', logger1.name)
self.assertEqual('route1', logger1.logger.name)
self.assertEqual('name1', logger1.server)
self.assertEqual('name1.', logger1.logger.statsd_client._prefix)
self.assertEqual('route2', logger2.name)
self.assertEqual('route2', logger2.logger.name)
self.assertEqual('name2', logger2.server)
self.assertEqual('name2.', logger2.logger.statsd_client._prefix)
self.assertIsNot(logger2.logger, logger1.logger)
with add_log_handler(logger1) as sio:
logger1.info('testing')
self.assertEqual('name1: testing\n', sio.getvalue())
with add_log_handler(logger2) as sio:
logger2.info('testing')
self.assertEqual('name2: testing\n', sio.getvalue())
@with_tempdir
def test_get_logger_sysloghandler_plumbing(self, tempdir):
orig_sysloghandler = utils.ThreadSafeSysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
# Some versions of python perform host resolution while initializing
# the handler. See https://bugs.python.org/issue30378
orig_getaddrinfo = socket.getaddrinfo
def fake_getaddrinfo(host, *args):
return orig_getaddrinfo('localhost', *args)
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
syslog_handler_catcher), \
mock.patch.object(socket, 'getaddrinfo', fake_getaddrinfo):
# default log_address
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(expected_args, syslog_handler_args)
# custom log_address - file doesn't exist: fallback to UDP
log_address = os.path.join(tempdir, 'foo')
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# custom log_address - file exists, not a socket: fallback to UDP
with open(log_address, 'w'):
pass
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# custom log_address - file exists, is a socket: use it
os.unlink(log_address)
with contextlib.closing(
socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)) as sock:
sock.settimeout(5)
sock.bind(log_address)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': log_address,
}, 'server', log_route='server')
expected_args = [
((), {'address': log_address,
'facility': orig_sysloghandler.LOG_LOCAL3})]
self.assertEqual(
expected_args, syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
side_effect=OSError(errno.EPERM, 'oops')):
with self.assertRaises(OSError) as cm:
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': 'log_address',
}, 'server', log_route='server')
self.assertEqual(errno.EPERM, cm.exception.errno)
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('errno.ECONNREFUSED message test', log_msg)
self.assertIn('Connection refused', log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Host unreachable', log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Connection timeout', log_msg)
log_exception(socket.error(errno.ENETUNREACH, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Network unreachable', log_msg)
log_exception(socket.error(errno.EPIPE, 'my error message'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertNotIn('my error message', log_msg)
self.assertIn('Broken pipe', log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertIn('my error message', log_msg)
# test eventlet.Timeout
with ConnectionTimeout(42, 'my error message') \
as connection_timeout:
now = time.time()
connection_timeout.created_at = now - 123.456
with mock.patch('swift.common.utils.time.time',
return_value=now):
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s after 123.46s)' in log_msg)
self.assertNotIn('my error message', log_msg)
with MessageTimeout(42, 'my error message') as message_timeout:
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
# test BadStatusLine
log_exception(http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('''BadStatusLine("''"''', log_msg)
# green version is separate :-(
log_exception(green_http_client.BadStatusLine(''))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn('''BadStatusLine("''"''', log_msg)
if not six.PY2:
# py3 introduced RemoteDisconnected exceptions which inherit
# from both BadStatusLine *and* OSError; make sure those are
# handled as BadStatusLine, not OSError
log_exception(http_client.RemoteDisconnected(
'Remote end closed connection'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn(
"RemoteDisconnected('Remote end closed connection'",
log_msg)
log_exception(green_http_client.RemoteDisconnected(
'Remote end closed connection'))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertIn(
"RemoteDisconnected('Remote end closed connection'",
log_msg)
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('txn', log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertIn('txn', log_msg)
self.assertIn('12345', log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertIn('my#012error#012message', log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertIn('my error message', log_msg)
self.assertNotIn('client_ip', log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertIn('client_ip', log_msg)
self.assertIn('1.2.3.4', log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertNotIn('client_ip', log_msg)
self.assertNotIn('1.2.3.4', log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_prefixlogger(self):
# setup stream logging
sio = StringIO()
base_logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
base_logger.logger.addHandler(handler)
logger = utils.PrefixLoggerAdapter(base_logger, {})
logger.set_prefix('some prefix: ')
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'some prefix: test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(
strip_value(sio),
'some prefix: test\nsome prefix: test\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertEqual('some prefix: ', log_msg[:13])
self.assertIn('my %s error message' % en, log_msg)
# unfiltered
log_exception(OSError())
log_msg = strip_value(sio)
self.assertIn('Traceback', log_msg)
self.assertEqual('some prefix: ', log_msg[:13])
finally:
base_logger.logger.removeHandler(handler)
@reset_logger_state
def test_nested_prefixlogger(self):
# setup stream logging
sio = StringIO()
base_logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
base_logger.logger.addHandler(handler)
inner_logger = utils.PrefixLoggerAdapter(base_logger, {})
inner_logger.set_prefix('one: ')
outer_logger = utils.PrefixLoggerAdapter(inner_logger, {})
outer_logger.set_prefix('two: ')
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
# establish base case
self.assertEqual(strip_value(sio), '')
inner_logger.info('test')
self.assertEqual(strip_value(sio), 'one: test\n')
outer_logger.info('test')
self.assertEqual(strip_value(sio), 'one: two: test\n')
self.assertEqual(strip_value(sio), '')
finally:
base_logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_select_node_ip(self):
dev = {
'ip': '127.0.0.1',
'port': 6200,
'replication_ip': '127.0.1.1',
'replication_port': 6400,
'device': 'sdb',
}
self.assertEqual(('127.0.0.1', 6200), utils.select_ip_port(dev))
self.assertEqual(('127.0.1.1', 6400),
utils.select_ip_port(dev, use_replication=True))
dev['use_replication'] = False
self.assertEqual(('127.0.1.1', 6400),
utils.select_ip_port(dev, use_replication=True))
dev['use_replication'] = True
self.assertEqual(('127.0.1.1', 6400), utils.select_ip_port(dev))
self.assertEqual(('127.0.1.1', 6400),
utils.select_ip_port(dev, use_replication=False))
def test_node_to_string(self):
dev = {
'id': 3,
'region': 1,
'zone': 1,
'ip': '127.0.0.1',
'port': 6200,
'replication_ip': '127.0.1.1',
'replication_port': 6400,
'device': 'sdb',
'meta': '',
'weight': 8000.0,
'index': 0,
}
self.assertEqual(utils.node_to_string(dev), '127.0.0.1:6200/sdb')
self.assertEqual(utils.node_to_string(dev, replication=True),
'127.0.1.1:6400/sdb')
dev['use_replication'] = False
self.assertEqual(utils.node_to_string(dev), '127.0.0.1:6200/sdb')
self.assertEqual(utils.node_to_string(dev, replication=True),
'127.0.1.1:6400/sdb')
dev['use_replication'] = True
self.assertEqual(utils.node_to_string(dev), '127.0.1.1:6400/sdb')
# Node dict takes precedence
self.assertEqual(utils.node_to_string(dev, replication=False),
'127.0.1.1:6400/sdb')
dev = {
'id': 3,
'region': 1,
'zone': 1,
'ip': "fe80::0204:61ff:fe9d:f156",
'port': 6200,
'replication_ip': "fe80::0204:61ff:ff9d:1234",
'replication_port': 6400,
'device': 'sdb',
'meta': '',
'weight': 8000.0,
'index': 0,
}
self.assertEqual(utils.node_to_string(dev),
'[fe80::0204:61ff:fe9d:f156]:6200/sdb')
self.assertEqual(utils.node_to_string(dev, replication=True),
'[fe80::0204:61ff:ff9d:1234]:6400/sdb')
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
b'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = b'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
# Unreadable/missing swift.conf causes IOError
# We mock in case the unit tests are run on a laptop with SAIO,
# which does have a natural /etc/swift/swift.conf.
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.SWIFT_CONF_FILE',
'/nosuchfile'), \
self.assertRaises(IOError):
utils.validate_hash_conf()
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read_file(self, fp):
pass
readfp = read_file
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \
mock.patch('swift.common.utils.SWIFT_CONF_FILE',
'/dev/null'), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaisesRegex(
ValueError, 'Unable to find section3 config section in.*',
utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(IOError, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
required_func_calls = ('setgroups', 'setgid', 'setuid')
mock_os = MockOs(called_funcs=required_func_calls)
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem}
self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0]))
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
def test_drop_privileges_no_setgroups(self):
required_func_calls = ('geteuid', 'setgid', 'setuid')
mock_os = MockOs(called_funcs=required_func_calls)
user = getuser()
user_data = pwd.getpwnam(user)
self.assertFalse(mock_os.called_funcs) # sanity check
# over-ride os with mock
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertNotIn('setgroups', mock_os.called_funcs)
self.assertEqual(user_data[5], mock_os.environ['HOME'])
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
def test_clean_up_daemon_hygene(self):
required_func_calls = ('chdir', 'umask')
# OSError if trying to get session leader, but setsid() OSError is
# ignored by the code under test.
bad_func_calls = ('setsid',)
mock_os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
with mock.patch('swift.common.utils.os', mock_os):
# exercise the code
utils.clean_up_daemon_hygiene()
for func in required_func_calls:
self.assertIn(func, mock_os.called_funcs)
for func in bad_func_calls:
self.assertIn(func, mock_os.called_funcs)
self.assertEqual('/', mock_os.called_funcs['chdir'][0])
self.assertEqual(0o22, mock_os.called_funcs['umask'][0])
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'ratelimit_sleep\(\) is deprecated')
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'ratelimit_sleep\(\) is deprecated')
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'ratelimit_sleep\(\) is deprecated')
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertIsNone(utils.remove_file(file_name))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertIsNone(utils.remove_file(file_name))
self.assertFalse(os.path.exists(file_name))
def test_remove_directory(self):
with temptree([]) as t:
dir_name = os.path.join(t, 'subdir')
os.mkdir(dir_name)
self.assertTrue(os.path.isdir(dir_name))
self.assertIsNone(utils.remove_directory(dir_name))
self.assertFalse(os.path.exists(dir_name))
# assert no raise only if it does not exist, or is not empty
self.assertEqual(os.path.exists(dir_name), False)
self.assertIsNone(utils.remove_directory(dir_name))
_m_rmdir = mock.Mock(
side_effect=OSError(errno.ENOTEMPTY,
os.strerror(errno.ENOTEMPTY)))
with mock.patch('swift.common.utils.os.rmdir', _m_rmdir):
self.assertIsNone(utils.remove_directory(dir_name))
_m_rmdir = mock.Mock(
side_effect=OSError(errno.EPERM, os.strerror(errno.EPERM)))
with mock.patch('swift.common.utils.os.rmdir', _m_rmdir):
self.assertRaises(OSError, utils.remove_directory, dir_name)
@with_tempdir
def test_is_file_older(self, tempdir):
ts = utils.Timestamp(time.time() - 100000)
file_name = os.path.join(tempdir, '%s.data' % ts.internal)
# assert no raise
self.assertFalse(os.path.exists(file_name))
self.assertTrue(utils.is_file_older(file_name, 0))
self.assertFalse(utils.is_file_older(file_name, 1))
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertTrue(utils.is_file_older(file_name, 0))
# check that timestamp in file name is not relevant
self.assertFalse(utils.is_file_older(file_name, 50000))
time.sleep(0.01)
self.assertTrue(utils.is_file_older(file_name, 0.009))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
self.assertTrue(utils.config_true_value(None) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_non_negative_float(self):
self.assertEqual(0, utils.non_negative_float('0.0'))
self.assertEqual(0, utils.non_negative_float(0.0))
self.assertEqual(1.1, utils.non_negative_float(1.1))
self.assertEqual(1.1, utils.non_negative_float('1.1'))
self.assertEqual(1.0, utils.non_negative_float('1'))
self.assertEqual(1, utils.non_negative_float(True))
self.assertEqual(0, utils.non_negative_float(False))
with self.assertRaises(ValueError) as cm:
utils.non_negative_float(-1.1)
self.assertEqual(
'Value must be a non-negative float number, not "-1.1".',
str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.non_negative_float('-1.1')
self.assertEqual(
'Value must be a non-negative float number, not "-1.1".',
str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.non_negative_float('one')
self.assertEqual(
'Value must be a non-negative float number, not "one".',
str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.non_negative_float(None)
self.assertEqual(
'Value must be a non-negative float number, not "None".',
str(cm.exception))
def test_non_negative_int(self):
self.assertEqual(0, utils.non_negative_int('0'))
self.assertEqual(0, utils.non_negative_int(0.0))
self.assertEqual(1, utils.non_negative_int(1))
self.assertEqual(1, utils.non_negative_int('1'))
self.assertEqual(1, utils.non_negative_int(True))
self.assertEqual(0, utils.non_negative_int(False))
with self.assertRaises(ValueError):
utils.non_negative_int(-1)
with self.assertRaises(ValueError):
utils.non_negative_int('-1')
with self.assertRaises(ValueError):
utils.non_negative_int('-1.1')
with self.assertRaises(ValueError):
utils.non_negative_int('1.1')
with self.assertRaises(ValueError):
utils.non_negative_int('1.0')
with self.assertRaises(ValueError):
utils.non_negative_int('one')
def test_config_positive_int_value(self):
expectations = {
# value : expected,
u'1': 1,
b'1': 1,
1: 1,
u'2': 2,
b'2': 2,
u'1024': 1024,
b'1024': 1024,
u'0': ValueError,
b'0': ValueError,
u'-1': ValueError,
b'-1': ValueError,
u'0x01': ValueError,
b'0x01': ValueError,
u'asdf': ValueError,
b'asdf': ValueError,
None: ValueError,
0: ValueError,
-1: ValueError,
u'1.2': ValueError, # string expresses float should be value error
b'1.2': ValueError, # string expresses float should be value error
}
for value, expected in expectations.items():
try:
rv = utils.config_positive_int_value(value)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(
'Config option must be an positive int number, '
'not "%s".' % value, e.args[0])
else:
self.assertEqual(expected, rv)
def test_config_float_value(self):
for args, expected in (
((99, None, None), 99.0),
((99.01, None, None), 99.01),
(('99', None, None), 99.0),
(('99.01', None, None), 99.01),
((99, 99, None), 99.0),
((99.01, 99.01, None), 99.01),
(('99', 99, None), 99.0),
(('99.01', 99.01, None), 99.01),
((99, None, 99), 99.0),
((99.01, None, 99.01), 99.01),
(('99', None, 99), 99.0),
(('99.01', None, 99.01), 99.01),
((-99, -99, -99), -99.0),
((-99.01, -99.01, -99.01), -99.01),
(('-99', -99, -99), -99.0),
(('-99.01', -99.01, -99.01), -99.01),):
actual = utils.config_float_value(*args)
self.assertEqual(expected, actual)
for val, minimum in ((99, 100),
('99', 100),
(-99, -98),
('-98.01', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertNotIn('less than', cm.exception.args[0])
for val, maximum in ((99, 98),
('99', 98),
(-99, -100),
('-97.9', -98)):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, maximum=maximum)
self.assertIn('less than %s' % maximum, cm.exception.args[0])
self.assertNotIn('greater than', cm.exception.args[0])
for val, minimum, maximum in ((99, 99, 98),
('99', 100, 100),
(99, 98, 98),):
with self.assertRaises(ValueError) as cm:
utils.config_float_value(val, minimum=minimum, maximum=maximum)
self.assertIn('greater than %s' % minimum, cm.exception.args[0])
self.assertIn('less than %s' % maximum, cm.exception.args[0])
def test_config_percent_value(self):
for arg, expected in (
(99, 0.99),
(25.5, 0.255),
('99', 0.99),
('25.5', 0.255),
(0, 0.0),
('0', 0.0),
('100', 1.0),
(100, 1.0),
(1, 0.01),
('1', 0.01),
(25, 0.25)):
actual = utils.config_percent_value(arg)
self.assertEqual(expected, actual)
# bad values
for val in (-1, '-1', 101, '101'):
with self.assertRaises(ValueError) as cm:
utils.config_percent_value(val)
self.assertIn('Config option must be a number, greater than 0, '
'less than 100, not "{}"'.format(val),
cm.exception.args[0])
def test_config_request_node_count_value(self):
def do_test(value, replicas, expected):
self.assertEqual(
expected,
utils.config_request_node_count_value(value)(replicas))
do_test('0', 10, 0)
do_test('1 * replicas', 3, 3)
do_test('1 * replicas', 11, 11)
do_test('2 * replicas', 3, 6)
do_test('2 * replicas', 11, 22)
do_test('11', 11, 11)
do_test('10', 11, 10)
do_test('12', 11, 12)
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with annotate_failure(bad):
with self.assertRaises(ValueError):
utils.config_request_node_count_value(bad)
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('garbage')
self.assertIsNone(ts)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write(b"test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), b"test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), b"test string")
f.seek(0)
f.write(b"\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), b"test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
rdir = os.path.join(tmpdir, "realtmp")
os.mkdir(rdir)
os.symlink(rdir, link)
self.assertFalse(utils.ismount(link))
# Can add a stubfile to make it pass
with open(os.path.join(link, ".ismount"), "w"):
pass
self.assertTrue(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_stubfile(self):
tmpdir = mkdtemp()
fname = os.path.join(tmpdir, ".ismount")
try:
with open(fname, "w") as stubfile:
stubfile.write("")
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=debug_logger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=debug_logger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_get_valid_utf8_str(self):
def do_test(input_value, expected):
actual = utils.get_valid_utf8_str(input_value)
self.assertEqual(expected, actual)
self.assertIsInstance(actual, six.binary_type)
actual.decode('utf-8')
do_test(b'abc', b'abc')
do_test(u'abc', b'abc')
do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81')
do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81')
# test some invalid UTF-8
do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd')
# check surrogate pairs, too
do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'),
do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'),
do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'),
def test_quote_bytes(self):
self.assertEqual(b'/v1/a/c3/subdirx/',
utils.quote(b'/v1/a/c3/subdirx/'))
self.assertEqual(b'/v1/a%26b/c3/subdirx/',
utils.quote(b'/v1/a&b/c3/subdirx/'))
self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(b'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(b'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'.encode('utf8')))
# Invalid utf8 is parsed as latin1, then re-encoded as utf8??
self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1]))
def test_quote_unicode(self):
self.assertEqual(u'/v1/a/c3/subdirx/',
utils.quote(u'/v1/a/c3/subdirx/'))
self.assertEqual(u'/v1/a%26b/c3/subdirx/',
utils.quote(u'/v1/a&b/c3/subdirx/'))
self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F',
utils.quote(u'/v1/a&b/c3/subdirx/', safe='&'))
self.assertEqual(u'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'))
def test_parse_override_options(self):
# When override_<thing> is passed in, it takes precedence.
opts = utils.parse_override_options(
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# When override_<thing> is passed in, it applies even in run-once
# mode.
opts = utils.parse_override_options(
once=True,
override_policies=[0, 1],
override_devices=['sda', 'sdb'],
override_partitions=[100, 200],
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1])
self.assertEqual(opts.devices, ['sda', 'sdb'])
self.assertEqual(opts.partitions, [100, 200])
# In run-once mode, we honor the passed-in overrides.
opts = utils.parse_override_options(
once=True,
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [0, 1, 2, 3])
self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd'])
self.assertEqual(opts.partitions, [100, 200, 300, 400])
# In run-forever mode, we ignore the passed-in overrides.
opts = utils.parse_override_options(
policies='0,1,2,3',
devices='sda,sdb,sdc,sdd',
partitions='100,200,300,400')
self.assertEqual(opts.policies, [])
self.assertEqual(opts.devices, [])
self.assertEqual(opts.partitions, [])
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_log_string_formatter(self):
# Plain ASCII
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'),
'Swift is great')
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
' great')
lf = utils.LogStringFormatter(default='-')
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
'- great')
lf = utils.LogStringFormatter(default='-', quote=True)
self.assertEqual(lf.format('{a} {b}', a='', b='great'),
'- great')
lf = utils.LogStringFormatter(quote=True)
self.assertEqual(lf.format('{a} {b}', a='Swift is', b='great'),
'Swift%20is great')
# Unicode & co
lf = utils.LogStringFormatter()
self.assertEqual(lf.format('{a} {b}', a='Swift est',
b=u'g\u00e9nial ^^'),
u'Swift est g\u00e9nial ^^')
lf = utils.LogStringFormatter(quote=True)
self.assertEqual(lf.format('{a} {b}', a='Swift est',
b=u'g\u00e9nial ^^'),
'Swift%20est g%C3%A9nial%20%5E%5E')
def test_str_anonymizer(self):
anon = utils.StrAnonymizer('Swift is great!', 'md5', '')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{MD5}45e6f00d48fdcf86213602a87df18772')
anon = utils.StrAnonymizer('Swift is great!', 'sha1', '')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SHA1}0010a3df215495d8bfa0ae4b66acc2afcc8f4c5c')
anon = utils.StrAnonymizer('Swift is great!', 'md5', 'salty_secret')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SMD5}ef4ce28fe3bdd10b6659458ceb1f3f0c')
anon = utils.StrAnonymizer('Swift is great!', 'sha1', 'salty_secret')
self.assertEqual(anon, 'Swift is great!')
self.assertEqual(anon.anonymized,
'{SSHA1}a4968f76acaddff0eb4069ebe8805d9cab44c9fe')
self.assertRaises(ValueError, utils.StrAnonymizer,
'Swift is great!', 'sha257', '')
def test_str_anonymizer_python_maddness(self):
with mock.patch('swift.common.utils.hashlib') as mocklib:
if six.PY2:
# python <2.7.9 doesn't have this algorithms_guaranteed, but
# our if block short-circuts before we explode
mocklib.algorithms = hashlib.algorithms
mocklib.algorithms_guaranteed.sideEffect = AttributeError()
else:
# python 3 doesn't have this algorithms but our if block
# short-circuts before we explode
mocklib.algorithms.sideEffect.sideEffect = AttributeError()
mocklib.algorithms_guaranteed = hashlib.algorithms_guaranteed
utils.StrAnonymizer('Swift is great!', 'sha1', '')
self.assertRaises(ValueError, utils.StrAnonymizer,
'Swift is great!', 'sha257', '')
def test_str_format_time(self):
dt = utils.StrFormatTime(10000.123456789)
self.assertEqual(str(dt), '10000.123456789')
self.assertEqual(dt.datetime, '01/Jan/1970/02/46/40')
self.assertEqual(dt.iso8601, '1970-01-01T02:46:40')
self.assertEqual(dt.asctime, 'Thu Jan 1 02:46:40 1970')
self.assertEqual(dt.s, '10000')
self.assertEqual(dt.ms, '123')
self.assertEqual(dt.us, '123456')
self.assertEqual(dt.ns, '123456789')
self.assertEqual(dt.a, 'Thu')
self.assertEqual(dt.A, 'Thursday')
self.assertEqual(dt.b, 'Jan')
self.assertEqual(dt.B, 'January')
self.assertEqual(dt.c, 'Thu Jan 1 02:46:40 1970')
self.assertEqual(dt.d, '01')
self.assertEqual(dt.H, '02')
self.assertEqual(dt.I, '02')
self.assertEqual(dt.j, '001')
self.assertEqual(dt.m, '01')
self.assertEqual(dt.M, '46')
self.assertEqual(dt.p, 'AM')
self.assertEqual(dt.S, '40')
self.assertEqual(dt.U, '00')
self.assertEqual(dt.w, '4')
self.assertEqual(dt.W, '00')
self.assertEqual(dt.x, '01/01/70')
self.assertEqual(dt.X, '02:46:40')
self.assertEqual(dt.y, '70')
self.assertEqual(dt.Y, '1970')
self.assertIn(dt.Z, ('GMT', 'UTC')) # It depends of Python 2/3
self.assertRaises(ValueError, getattr, dt, 'z')
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch('time.time', mock.MagicMock(side_effect=[10001.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info,
utils.LOG_LINE_DEFAULT_FORMAT,
'md5', '54LT'))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = debug_logger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = debug_logger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp()
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp()
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_find_namespace(self):
ts = utils.Timestamp.now().internal
start = utils.ShardRange('a/-a', ts, '', 'a')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
end = utils.ShardRange('a/z-', ts, 'z', '')
ranges = [start, atof, ftol, ltor, rtoz, end]
found = utils.find_namespace('', ranges)
self.assertEqual(found, None)
found = utils.find_namespace(' ', ranges)
self.assertEqual(found, start)
found = utils.find_namespace(' ', ranges[1:])
self.assertEqual(found, None)
found = utils.find_namespace('b', ranges)
self.assertEqual(found, atof)
found = utils.find_namespace('f', ranges)
self.assertEqual(found, atof)
found = utils.find_namespace('f\x00', ranges)
self.assertEqual(found, ftol)
found = utils.find_namespace('x', ranges)
self.assertEqual(found, rtoz)
found = utils.find_namespace('r', ranges)
self.assertEqual(found, ltor)
found = utils.find_namespace('}', ranges)
self.assertEqual(found, end)
found = utils.find_namespace('}', ranges[:-1])
self.assertEqual(found, None)
# remove l-r from list of ranges and try and find a shard range for an
# item in that range.
found = utils.find_namespace('p', ranges[:-3] + ranges[-2:])
self.assertEqual(found, None)
# add some sub-shards; a sub-shard's state is less than its parent
# while the parent is undeleted, so insert these ahead of the
# overlapping parent in the list of ranges
ftoh = utils.ShardRange('a/f-h', ts, 'f', 'h')
htok = utils.ShardRange('a/h-k', ts, 'h', 'k')
overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:]
found = utils.find_namespace('g', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_namespace('h', overlapping_ranges)
self.assertEqual(found, ftoh)
found = utils.find_namespace('k', overlapping_ranges)
self.assertEqual(found, htok)
found = utils.find_namespace('l', overlapping_ranges)
self.assertEqual(found, ftol)
found = utils.find_namespace('m', overlapping_ranges)
self.assertEqual(found, ltor)
ktol = utils.ShardRange('a/k-l', ts, 'k', 'l')
overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:]
found = utils.find_namespace('l', overlapping_ranges)
self.assertEqual(found, ktol)
def test_parse_db_filename(self):
actual = utils.parse_db_filename('hash.db')
self.assertEqual(('hash', None, '.db'), actual)
actual = utils.parse_db_filename('hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
actual = utils.parse_db_filename(
'/dev/containers/part/ash/hash/hash_1234567890.12345.db')
self.assertEqual(('hash', '1234567890.12345', '.db'), actual)
self.assertRaises(ValueError, utils.parse_db_filename, '/path/to/dir/')
# These shouldn't come up in practice; included for completeness
self.assertEqual(utils.parse_db_filename('hashunder_.db'),
('hashunder', '', '.db'))
self.assertEqual(utils.parse_db_filename('lots_of_underscores.db'),
('lots', 'of', '.db'))
def test_make_db_file_path(self):
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path('hash.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('hash_oldepoch.db', epoch)
self.assertEqual('hash_%s.db' % epoch.internal, actual)
actual = utils.make_db_file_path('/path/to/hash.db', epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
epoch = utils.Timestamp.now()
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.internal, actual)
# None strips epoch
self.assertEqual('hash.db', utils.make_db_file_path('hash.db', None))
self.assertEqual('/path/to/hash.db', utils.make_db_file_path(
'/path/to/hash_withepoch.db', None))
# epochs shouldn't have offsets
epoch = utils.Timestamp.now(offset=10)
actual = utils.make_db_file_path(actual, epoch)
self.assertEqual('/path/to/hash_%s.db' % epoch.normal, actual)
self.assertRaises(ValueError, utils.make_db_file_path,
'/path/to/hash.db', 'bad epoch')
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = b"I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'rb') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp()
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, b"hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, b"bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'rb') as f:
self.assertEqual(f.read(), b"bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support_in_tmp
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp()
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_strict_b64decode(self):
expectations = {
None: ValueError,
0: ValueError,
b'': b'',
u'': b'',
b'A': ValueError,
b'AA': ValueError,
b'AAA': ValueError,
b'AAAA': b'\x00\x00\x00',
u'AAAA': b'\x00\x00\x00',
b'////': b'\xff\xff\xff',
u'////': b'\xff\xff\xff',
b'A===': ValueError,
b'AA==': b'\x00',
b'AAA=': b'\x00\x00',
b' AAAA': ValueError,
b'AAAA ': ValueError,
b'AAAA============': b'\x00\x00\x00',
b'AA&AA==': ValueError,
b'====': b'',
}
failures = []
for value, expected in expectations.items():
try:
result = utils.strict_b64decode(value)
except Exception as e:
if inspect.isclass(expected) and issubclass(
expected, Exception):
if not isinstance(e, expected):
failures.append('%r raised %r (expected to raise %r)' %
(value, e, expected))
else:
failures.append('%r raised %r (expected to return %r)' %
(value, e, expected))
else:
if inspect.isclass(expected) and issubclass(
expected, Exception):
failures.append('%r => %r (expected to raise %r)' %
(value, result, expected))
elif result != expected:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
def test_cap_length(self):
self.assertEqual(utils.cap_length(None, 3), None)
self.assertEqual(utils.cap_length('', 3), '')
self.assertEqual(utils.cap_length('asdf', 3), 'asd...')
self.assertEqual(utils.cap_length('asdf', 5), 'asdf')
self.assertEqual(utils.cap_length(b'asdf', 3), b'asd...')
self.assertEqual(utils.cap_length(b'asdf', 5), b'asdf')
def test_get_partition_for_hash(self):
hex_hash = 'af088baea4806dcaba30bf07d9e64c77'
self.assertEqual(43, utils.get_partition_for_hash(hex_hash, 6))
self.assertEqual(87, utils.get_partition_for_hash(hex_hash, 7))
self.assertEqual(350, utils.get_partition_for_hash(hex_hash, 9))
self.assertEqual(700, utils.get_partition_for_hash(hex_hash, 10))
self.assertEqual(1400, utils.get_partition_for_hash(hex_hash, 11))
self.assertEqual(0, utils.get_partition_for_hash(hex_hash, 0))
self.assertEqual(0, utils.get_partition_for_hash(hex_hash, -1))
def test_get_partition_from_path(self):
def do_test(path):
self.assertEqual(utils.get_partition_from_path('/s/n', path), 70)
self.assertEqual(utils.get_partition_from_path('/s/n/', path), 70)
path += '/'
self.assertEqual(utils.get_partition_from_path('/s/n', path), 70)
self.assertEqual(utils.get_partition_from_path('/s/n/', path), 70)
do_test('/s/n/d/o/70/c77/af088baea4806dcaba30bf07d9e64c77/f')
# also works with a hashdir
do_test('/s/n/d/o/70/c77/af088baea4806dcaba30bf07d9e64c77')
# or suffix dir
do_test('/s/n/d/o/70/c77')
# or even the part dir itself
do_test('/s/n/d/o/70')
def test_replace_partition_in_path(self):
# Check for new part = part * 2
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path('/s/n/', old, 11),
new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 10), old)
self.assertEqual(utils.replace_partition_in_path('/s/n/', new, 11),
new)
# Check for new part = part * 2 + 1
old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f'
# Expected outcome
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 11), new)
# Make sure there is no change if the part power didn't change
self.assertEqual(utils.replace_partition_in_path('/s/n', old, 10), old)
self.assertEqual(utils.replace_partition_in_path('/s/n/', new, 11),
new)
# check hash_dir
old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('/s/n', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('/s/n', exp, 11)
self.assertEqual(exp, actual)
# check longer devices path
old = '/s/n/1/2/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/s/n/1/2/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('/s/n/1/2', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('/s/n/1/2', exp, 11)
self.assertEqual(exp, actual)
# check empty devices path
old = '/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77'
exp = '/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77'
actual = utils.replace_partition_in_path('', old, 11)
self.assertEqual(exp, actual)
actual = utils.replace_partition_in_path('', exp, 11)
self.assertEqual(exp, actual)
# check path validation
path = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
with self.assertRaises(ValueError) as cm:
utils.replace_partition_in_path('/s/n1', path, 11)
self.assertEqual(
"Path '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' "
"is not under device dir '/s/n1'", str(cm.exception))
# check path validation - path lacks leading /
path = 's/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f'
with self.assertRaises(ValueError) as cm:
utils.replace_partition_in_path('/s/n', path, 11)
self.assertEqual(
"Path 's/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' "
"is not under device dir '/s/n'", str(cm.exception))
def test_round_robin_iter(self):
it1 = iter([1, 2, 3])
it2 = iter([4, 5])
it3 = iter([6, 7, 8, 9])
it4 = iter([])
rr_its = utils.round_robin_iter([it1, it2, it3, it4])
got = list(rr_its)
# Expect that items get fetched in a round-robin fashion from the
# iterators
self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got)
@with_tempdir
def test_get_db_files(self, tempdir):
dbdir = os.path.join(tempdir, 'dbdir')
self.assertEqual([], utils.get_db_files(dbdir))
path_1 = os.path.join(dbdir, 'dbfile.db')
self.assertEqual([], utils.get_db_files(path_1))
os.mkdir(dbdir)
self.assertEqual([], utils.get_db_files(path_1))
with open(path_1, 'wb'):
pass
self.assertEqual([path_1], utils.get_db_files(path_1))
path_2 = os.path.join(dbdir, 'dbfile_2.db')
self.assertEqual([path_1], utils.get_db_files(path_2))
with open(path_2, 'wb'):
pass
self.assertEqual([path_1, path_2], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2], utils.get_db_files(path_2))
path_3 = os.path.join(dbdir, 'dbfile_3.db')
self.assertEqual([path_1, path_2], utils.get_db_files(path_3))
with open(path_3, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
other_hash = os.path.join(dbdir, 'other.db')
self.assertEqual([], utils.get_db_files(other_hash))
other_hash = os.path.join(dbdir, 'other_1.db')
self.assertEqual([], utils.get_db_files(other_hash))
pending = os.path.join(dbdir, 'dbfile.pending')
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
with open(pending, 'wb'):
pass
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(pending))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_1, path_2, path_3], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files(dbdir))
os.unlink(path_1)
self.assertEqual([path_2, path_3], utils.get_db_files(path_1))
self.assertEqual([path_2, path_3], utils.get_db_files(path_2))
self.assertEqual([path_2, path_3], utils.get_db_files(path_3))
os.unlink(path_2)
self.assertEqual([path_3], utils.get_db_files(path_1))
self.assertEqual([path_3], utils.get_db_files(path_2))
self.assertEqual([path_3], utils.get_db_files(path_3))
os.unlink(path_3)
self.assertEqual([], utils.get_db_files(path_1))
self.assertEqual([], utils.get_db_files(path_2))
self.assertEqual([], utils.get_db_files(path_3))
self.assertEqual([], utils.get_db_files('/path/to/nowhere'))
def test_get_redirect_data(self):
ts_now = utils.Timestamp.now()
headers = {'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
self.assertIsNone(utils.get_redirect_data(response))
headers = {'Location': '/a/c/o',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
headers = {'Location': '/a/c',
'X-Backend-Redirect-Timestamp': ts_now.internal}
response = FakeResponse(200, headers, b'')
path, ts = utils.get_redirect_data(response)
self.assertEqual('a/c', path)
self.assertEqual(ts_now, ts)
def do_test(headers):
response = FakeResponse(200, headers, b'')
with self.assertRaises(ValueError) as cm:
utils.get_redirect_data(response)
return cm.exception
exc = do_test({'Location': '/a',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '',
'X-Backend-Redirect-Timestamp': ts_now.internal})
self.assertIn('Invalid path', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': 'bad'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c'})
self.assertIn('Invalid timestamp', str(exc))
exc = do_test({'Location': '/a/c',
'X-Backend-Redirect-Timestamp': '-1'})
self.assertIn('Invalid timestamp', str(exc))
@unittest.skipIf(sys.version_info >= (3, 8),
'pkg_resources loading is only available on python 3.7 '
'and earlier')
@mock.patch('pkg_resources.load_entry_point')
def test_load_pkg_resource(self, mock_driver):
tests = {
('swift.diskfile', 'egg:swift#replication.fs'):
('swift', 'swift.diskfile', 'replication.fs'),
('swift.diskfile', 'egg:swift#erasure_coding.fs'):
('swift', 'swift.diskfile', 'erasure_coding.fs'),
('swift.section', 'egg:swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'swift#thing.other'):
('swift', 'swift.section', 'thing.other'),
('swift.section', 'thing.other'):
('swift', 'swift.section', 'thing.other'),
}
for args, expected in tests.items():
utils.load_pkg_resource(*args)
mock_driver.assert_called_with(*expected)
with self.assertRaises(TypeError) as cm:
args = ('swift.diskfile', 'nog:swift#replication.fs')
utils.load_pkg_resource(*args)
self.assertEqual("Unhandled URI scheme: 'nog'", str(cm.exception))
@unittest.skipIf(sys.version_info < (3, 8),
'importlib loading is only available on python 3.8 '
'and later')
@mock.patch('importlib.metadata.distribution')
def test_load_pkg_resource_importlib(self, mock_driver):
import importlib.metadata
class TestEntryPoint(importlib.metadata.EntryPoint):
def load(self):
return self.value
repl_obj = object()
ec_obj = object()
other_obj = object()
mock_driver.return_value.entry_points = [
TestEntryPoint(group='swift.diskfile',
name='replication.fs',
value=repl_obj),
TestEntryPoint(group='swift.diskfile',
name='erasure_coding.fs',
value=ec_obj),
TestEntryPoint(group='swift.section',
name='thing.other',
value=other_obj),
]
tests = {
('swift.diskfile', 'egg:swift#replication.fs'): repl_obj,
('swift.diskfile', 'egg:swift#erasure_coding.fs'): ec_obj,
('swift.section', 'egg:swift#thing.other'): other_obj,
('swift.section', 'swift#thing.other'): other_obj,
('swift.section', 'thing.other'): other_obj,
}
for args, expected in tests.items():
self.assertIs(expected, utils.load_pkg_resource(*args))
self.assertEqual(mock_driver.mock_calls, [mock.call('swift')])
mock_driver.reset_mock()
with self.assertRaises(TypeError) as cm:
args = ('swift.diskfile', 'nog:swift#replication.fs')
utils.load_pkg_resource(*args)
self.assertEqual("Unhandled URI scheme: 'nog'", str(cm.exception))
with self.assertRaises(ImportError) as cm:
args = ('swift.diskfile', 'other.fs')
utils.load_pkg_resource(*args)
self.assertEqual(
"Entry point ('swift.diskfile', 'other.fs') not found",
str(cm.exception))
with self.assertRaises(ImportError) as cm:
args = ('swift.missing', 'thing.other')
utils.load_pkg_resource(*args)
self.assertEqual(
"Entry point ('swift.missing', 'thing.other') not found",
str(cm.exception))
@with_tempdir
def test_systemd_notify(self, tempdir):
m_sock = mock.Mock(connect=mock.Mock(), sendall=mock.Mock())
with mock.patch('swift.common.utils.socket.socket',
return_value=m_sock) as m_socket:
# No notification socket
m_socket.reset_mock()
m_sock.reset_mock()
utils.systemd_notify()
self.assertEqual(m_socket.call_count, 0)
self.assertEqual(m_sock.connect.call_count, 0)
self.assertEqual(m_sock.sendall.call_count, 0)
# File notification socket
m_socket.reset_mock()
m_sock.reset_mock()
os.environ['NOTIFY_SOCKET'] = 'foobar'
utils.systemd_notify()
m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM)
m_sock.connect.assert_called_once_with('foobar')
m_sock.sendall.assert_called_once_with(b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# Abstract notification socket
m_socket.reset_mock()
m_sock.reset_mock()
os.environ['NOTIFY_SOCKET'] = '@foobar'
utils.systemd_notify()
m_socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM)
m_sock.connect.assert_called_once_with('\0foobar')
m_sock.sendall.assert_called_once_with(b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# Test logger with connection error
m_sock = mock.Mock(connect=mock.Mock(side_effect=EnvironmentError),
sendall=mock.Mock())
m_logger = mock.Mock(debug=mock.Mock())
with mock.patch('swift.common.utils.socket.socket',
return_value=m_sock) as m_socket:
os.environ['NOTIFY_SOCKET'] = '@foobar'
m_sock.reset_mock()
m_logger.reset_mock()
utils.systemd_notify()
self.assertEqual(0, m_sock.sendall.call_count)
self.assertEqual(0, m_logger.debug.call_count)
m_sock.reset_mock()
m_logger.reset_mock()
utils.systemd_notify(logger=m_logger)
self.assertEqual(0, m_sock.sendall.call_count)
m_logger.debug.assert_called_once_with(
"Systemd notification failed", exc_info=True)
# Test it for real
def do_test_real_socket(socket_address, notify_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.bind(socket_address)
os.environ['NOTIFY_SOCKET'] = notify_socket
utils.systemd_notify()
msg = sock.recv(512)
sock.close()
self.assertEqual(msg, b'READY=1')
self.assertNotIn('NOTIFY_SOCKET', os.environ)
# test file socket address
socket_path = os.path.join(tempdir, 'foobar')
do_test_real_socket(socket_path, socket_path)
if sys.platform.startswith('linux'):
# test abstract socket address
do_test_real_socket('\0foobar', '@foobar')
def test_md5_with_data(self):
if not self.fips_enabled:
digest = md5(self.md5_test_data).hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
# on a FIPS enabled system, this throws a ValueError:
# [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS
self.assertRaises(ValueError, md5, self.md5_test_data)
if not self.fips_enabled:
digest = md5(self.md5_test_data, usedforsecurity=True).hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(
ValueError, md5, self.md5_test_data, usedforsecurity=True)
digest = md5(self.md5_test_data, usedforsecurity=False).hexdigest()
self.assertEqual(digest, self.md5_digest)
def test_md5_without_data(self):
if not self.fips_enabled:
test_md5 = md5()
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(ValueError, md5)
if not self.fips_enabled:
test_md5 = md5(usedforsecurity=True)
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
else:
self.assertRaises(ValueError, md5, usedforsecurity=True)
test_md5 = md5(usedforsecurity=False)
test_md5.update(self.md5_test_data)
digest = test_md5.hexdigest()
self.assertEqual(digest, self.md5_digest)
@unittest.skipIf(sys.version_info.major == 2,
"hashlib.md5 does not raise TypeError here in py2")
def test_string_data_raises_type_error(self):
if not self.fips_enabled:
self.assertRaises(TypeError, hashlib.md5, u'foo')
self.assertRaises(TypeError, md5, u'foo')
self.assertRaises(
TypeError, md5, u'foo', usedforsecurity=True)
else:
self.assertRaises(ValueError, hashlib.md5, u'foo')
self.assertRaises(ValueError, md5, u'foo')
self.assertRaises(
ValueError, md5, u'foo', usedforsecurity=True)
self.assertRaises(
TypeError, md5, u'foo', usedforsecurity=False)
def test_none_data_raises_type_error(self):
if not self.fips_enabled:
self.assertRaises(TypeError, hashlib.md5, None)
self.assertRaises(TypeError, md5, None)
self.assertRaises(
TypeError, md5, None, usedforsecurity=True)
else:
self.assertRaises(ValueError, hashlib.md5, None)
self.assertRaises(ValueError, md5, None)
self.assertRaises(
ValueError, md5, None, usedforsecurity=True)
self.assertRaises(
TypeError, md5, None, usedforsecurity=False)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
self.ts = make_timestamp_iter()
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or next(self.ts)
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, next(self.ts))
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, next(self.ts))
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, next(self.ts))
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = next(self.ts)
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = next(self.ts)
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], next(self.ts))
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], next(self.ts))
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], next(self.ts))
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
def test_get_hub(self):
# This test mock the eventlet.green.select module without poll
# as in eventlet > 0.20
# https://github.com/eventlet/eventlet/commit/614a20462
# We add __original_module_select to sys.modules to mock usage
# of eventlet.patcher.original
class SelectWithPoll(object):
def poll():
pass
class SelectWithoutPoll(object):
pass
# Platform with poll() that call get_hub before eventlet patching
with mock.patch.dict('sys.modules',
{'select': SelectWithPoll,
'__original_module_select': SelectWithPoll}):
self.assertEqual(utils.get_hub(), 'poll')
# Platform with poll() that call get_hub after eventlet patching
with mock.patch.dict('sys.modules',
{'select': SelectWithoutPoll,
'__original_module_select': SelectWithPoll}):
self.assertEqual(utils.get_hub(), 'poll')
# Platform without poll() -- before or after patching doesn't matter
with mock.patch.dict('sys.modules',
{'select': SelectWithoutPoll,
'__original_module_select': SelectWithoutPoll}):
self.assertEqual(utils.get_hub(), 'selects')
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger2 = utils.get_logger(
{'log_statsd_host': 'some.host.com'},
'other-name', log_route='some-route',
statsd_tail_prefix='some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
self.assertEqual(logger2.logger.statsd_client._prefix,
'some-name.more-specific.')
# note: set_statsd_prefix is deprecated
logger2 = utils.get_logger({'log_statsd_host': 'some.host.com'},
'other-name', log_route='some-route')
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
self.assertEqual(logger2.logger.statsd_client._prefix,
'some-name.more-specific.')
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
self.assertEqual(logger2.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
conf = {
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}
logger = utils.get_logger(conf, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger = utils.get_logger(conf, 'other-name', log_route='some-route',
statsd_tail_prefix='some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
# note: set_statsd_prefix is deprecated
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_statsd_set_prefix_deprecation(self):
conf = {'log_statsd_host': 'another.host.com'}
with warnings.catch_warnings(record=True) as cm:
if six.PY2:
getattr(utils, '__warningregistry__', {}).clear()
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
logger = utils.get_logger(
conf, 'some-name', log_route='some-route')
logger.logger.statsd_client.set_prefix('some-name.more-specific')
msgs = [str(warning.message)
for warning in cm
if str(warning.message).startswith('set_prefix')]
self.assertEqual(
['set_prefix() is deprecated; use the ``tail_prefix`` argument of '
'the constructor when instantiating the class instead.'],
msgs)
with warnings.catch_warnings(record=True) as cm:
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
logger = utils.get_logger(
conf, 'some-name', log_route='some-route')
logger.set_statsd_prefix('some-name.more-specific')
msgs = [str(warning.message)
for warning in cm
if str(warning.message).startswith('set_statsd_prefix')]
self.assertEqual(
['set_statsd_prefix() is deprecated; use the '
'``statsd_tail_prefix`` argument to ``get_logger`` instead.'],
msgs)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test patches over the existing mock. If we just stop the
# existing mock, then unittest.exit() blows up, but stacking
# real-fake-fake works okay.
calls = []
def fake_getaddrinfo(host, port, family, *args):
calls.append(family)
if len(calls) == 1:
raise socket.gaierror
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual([socket.AF_INET, socket.AF_INET6], calls)
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = debug_logger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = debug_logger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(400)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(500)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(507)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
def test_memcached_timing_stats(self):
class MockMemcached(object):
def __init__(self):
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.memcached_timing_stats()
def set(cache):
pass
@utils.memcached_timing_stats()
def get(cache):
pass
mock_cache = MockMemcached()
with patch('time.time',) as mock_time:
mock_time.return_value = 1000.99
set(mock_cache)
self.assertEqual(mock_cache.called, 'timing')
self.assertEqual(len(mock_cache.args), 2)
self.assertEqual(mock_cache.args[0], 'memcached.set.timing')
self.assertEqual(mock_cache.args[1], 1000.99)
mock_time.return_value = 2000.99
get(mock_cache)
self.assertEqual(mock_cache.called, 'timing')
self.assertEqual(len(mock_cache.args), 2)
self.assertEqual(mock_cache.args[0], 'memcached.get.timing')
self.assertEqual(mock_cache.args[1], 2000.99)
class UnsafeXrange(object):
"""
Like range(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestEventletRateLimiter(unittest.TestCase):
def test_init(self):
rl = utils.EventletRateLimiter(0.1)
self.assertEqual(0.1, rl.max_rate)
self.assertEqual(0.0, rl.running_time)
self.assertEqual(5000, rl.rate_buffer_ms)
rl = utils.EventletRateLimiter(
0.2, rate_buffer=2, running_time=1234567.8)
self.assertEqual(0.2, rl.max_rate)
self.assertEqual(1234567.8, rl.running_time)
self.assertEqual(2000, rl.rate_buffer_ms)
def test_non_blocking(self):
rate_limiter = utils.EventletRateLimiter(0.1, rate_buffer=0)
with patch('time.time',) as mock_time:
with patch('eventlet.sleep') as mock_sleep:
mock_time.return_value = 0
self.assertTrue(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
self.assertFalse(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
mock_time.return_value = 9.99
self.assertFalse(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
mock_time.return_value = 10.0
self.assertTrue(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
self.assertFalse(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
rate_limiter = utils.EventletRateLimiter(0.1, rate_buffer=20)
with patch('time.time',) as mock_time:
with patch('eventlet.sleep') as mock_sleep:
mock_time.return_value = 20.0
self.assertTrue(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
self.assertTrue(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
self.assertTrue(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
self.assertFalse(rate_limiter.is_allowed())
mock_sleep.assert_not_called()
def _do_test(self, max_rate, running_time, start_time, rate_buffer,
burst_after_idle=False, incr_by=1.0):
rate_limiter = utils.EventletRateLimiter(
max_rate,
running_time=1000 * running_time, # msecs
rate_buffer=rate_buffer,
burst_after_idle=burst_after_idle)
grant_times = []
current_time = [start_time]
def mock_time():
return current_time[0]
def mock_sleep(duration):
current_time[0] += duration
with patch('time.time', mock_time):
with patch('eventlet.sleep', mock_sleep):
for i in range(5):
rate_limiter.wait(incr_by=incr_by)
grant_times.append(current_time[0])
return [round(t, 6) for t in grant_times]
def test_ratelimit(self):
grant_times = self._do_test(1, 0, 1, 0)
self.assertEqual([1, 2, 3, 4, 5], grant_times)
grant_times = self._do_test(10, 0, 1, 0)
self.assertEqual([1, 1.1, 1.2, 1.3, 1.4], grant_times)
grant_times = self._do_test(.1, 0, 1, 0)
self.assertEqual([1, 11, 21, 31, 41], grant_times)
grant_times = self._do_test(.1, 11, 1, 0)
self.assertEqual([11, 21, 31, 41, 51], grant_times)
def test_incr_by(self):
grant_times = self._do_test(1, 0, 1, 0, incr_by=2.5)
self.assertEqual([1, 3.5, 6, 8.5, 11], grant_times)
def test_burst(self):
grant_times = self._do_test(1, 1, 4, 0)
self.assertEqual([4, 5, 6, 7, 8], grant_times)
grant_times = self._do_test(1, 1, 4, 1)
self.assertEqual([4, 5, 6, 7, 8], grant_times)
grant_times = self._do_test(1, 1, 4, 2)
self.assertEqual([4, 5, 6, 7, 8], grant_times)
grant_times = self._do_test(1, 1, 4, 3)
self.assertEqual([4, 4, 4, 4, 5], grant_times)
grant_times = self._do_test(1, 1, 4, 4)
self.assertEqual([4, 4, 4, 4, 5], grant_times)
grant_times = self._do_test(1, 1, 3, 3)
self.assertEqual([3, 3, 3, 4, 5], grant_times)
grant_times = self._do_test(1, 0, 2, 3)
self.assertEqual([2, 2, 2, 3, 4], grant_times)
grant_times = self._do_test(1, 1, 3, 3)
self.assertEqual([3, 3, 3, 4, 5], grant_times)
grant_times = self._do_test(1, 0, 3, 3)
self.assertEqual([3, 3, 3, 3, 4], grant_times)
grant_times = self._do_test(1, 1, 3, 3)
self.assertEqual([3, 3, 3, 4, 5], grant_times)
grant_times = self._do_test(1, 0, 4, 3)
self.assertEqual([4, 5, 6, 7, 8], grant_times)
def test_burst_after_idle(self):
grant_times = self._do_test(1, 1, 4, 1, burst_after_idle=True)
self.assertEqual([4, 4, 5, 6, 7], grant_times)
grant_times = self._do_test(1, 1, 4, 2, burst_after_idle=True)
self.assertEqual([4, 4, 4, 5, 6], grant_times)
grant_times = self._do_test(1, 0, 4, 3, burst_after_idle=True)
self.assertEqual([4, 4, 4, 4, 5], grant_times)
# running_time = start_time prevents burst on start-up
grant_times = self._do_test(1, 4, 4, 3, burst_after_idle=True)
self.assertEqual([4, 5, 6, 7, 8], grant_times)
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.daemon = True
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches(r'some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches(r'pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches(r'another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
r'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', r'set_statsd_prefix\(\) is deprecated')
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches(
r'alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestSwiftLoggerAdapter(unittest.TestCase):
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger({}, 'foo')
adapter1 = utils.SwiftLoggerAdapter(logger, {})
adapter2 = utils.SwiftLoggerAdapter(logger, {})
locals1 = ('tx_123', '1.2.3.4')
adapter1.thread_locals = locals1
self.assertEqual(adapter1.thread_locals, locals1)
self.assertEqual(adapter2.thread_locals, locals1)
self.assertEqual(logger.thread_locals, locals1)
locals2 = ('tx_456', '1.2.3.456')
logger.thread_locals = locals2
self.assertEqual(adapter1.thread_locals, locals2)
self.assertEqual(adapter2.thread_locals, locals2)
self.assertEqual(logger.thread_locals, locals2)
logger.thread_locals = (None, None)
def test_exception(self):
# verify that the adapter routes exception calls to utils.LogAdapter
# for special case handling
logger = utils.get_logger({})
adapter = utils.SwiftLoggerAdapter(logger, {})
try:
raise OSError(errno.ECONNREFUSED, 'oserror')
except OSError:
with mock.patch('logging.LoggerAdapter.error') as mocked:
adapter.exception('Caught')
mocked.assert_called_with('Caught: Connection refused')
class TestMetricsPrefixLoggerAdapter(unittest.TestCase):
def test_metric_prefix(self):
logger = utils.get_logger({}, 'logger_name')
adapter1 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'one')
adapter2 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'two')
adapter3 = utils.SwiftLoggerAdapter(logger, {})
self.assertEqual('logger_name', logger.name)
self.assertEqual('logger_name', adapter1.logger.name)
self.assertEqual('logger_name', adapter2.logger.name)
self.assertEqual('logger_name', adapter3.logger.name)
with mock.patch.object(logger, 'increment') as mock_increment:
adapter1.increment('test1')
adapter2.increment('test2')
adapter3.increment('test3')
logger.increment('test')
self.assertEqual(
[mock.call('one.test1'), mock.call('two.test2'),
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
adapter1.metric_prefix = 'not one'
with mock.patch.object(logger, 'increment') as mock_increment:
adapter1.increment('test1')
adapter2.increment('test2')
adapter3.increment('test3')
logger.increment('test')
self.assertEqual(
[mock.call('not one.test1'), mock.call('two.test2'),
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
def test_wrapped_prefixing(self):
logger = utils.get_logger({}, 'logger_name')
adapter1 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'one')
adapter2 = utils.MetricsPrefixLoggerAdapter(adapter1, {}, 'two')
self.assertEqual('logger_name', logger.name)
self.assertEqual('logger_name', adapter1.logger.name)
self.assertEqual('logger_name', adapter2.logger.name)
with mock.patch.object(logger, 'increment') as mock_increment:
adapter1.increment('test1')
adapter2.increment('test2')
logger.increment('test')
self.assertEqual(
[mock.call('one.test1'),
mock.call('one.two.test2'),
mock.call('test')],
mock_increment.call_args_list)
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
expected_dirs = list()
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
expected_dirs.append((hash_path, 'drive', 'partition1'))
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
expected_dirs.append((hash_path, 'drive', 'partition2'))
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# check yield_hash_dirs option
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger,
yield_hash_dirs=True,
)
got_dirs = list(locations)
self.assertEqual(sorted(got_dirs), sorted(expected_dirs))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
def test_hooks(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix1")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash1")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
hook_pre_device = MagicMock()
hook_post_device = MagicMock()
hook_pre_partition = MagicMock()
hook_post_partition = MagicMock()
hook_pre_suffix = MagicMock()
hook_post_suffix = MagicMock()
hook_pre_hash = MagicMock()
hook_post_hash = MagicMock()
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger,
hook_pre_device=hook_pre_device,
hook_post_device=hook_post_device,
hook_pre_partition=hook_pre_partition,
hook_post_partition=hook_post_partition,
hook_pre_suffix=hook_pre_suffix,
hook_post_suffix=hook_post_suffix,
hook_pre_hash=hook_pre_hash,
hook_post_hash=hook_post_hash
)
list(locations)
hook_pre_device.assert_called_once_with(os.path.join(tmpdir,
"drive"))
hook_post_device.assert_called_once_with(os.path.join(tmpdir,
"drive"))
hook_pre_partition.assert_called_once_with(partition)
hook_post_partition.assert_called_once_with(partition)
hook_pre_suffix.assert_called_once_with(suffix)
hook_post_suffix.assert_called_once_with(suffix)
hook_pre_hash.assert_called_once_with(hash_path)
hook_post_hash.assert_called_once_with(hash_path)
def test_filters(self):
with temptree([]) as tmpdir:
logger = debug_logger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix1")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash1")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
def audit_location_generator(**kwargs):
return utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger,
**kwargs)
# Return the list of devices
with patch('os.listdir', side_effect=os.listdir) as m_listdir:
# devices_filter
m_listdir.reset_mock()
devices_filter = MagicMock(return_value=["drive"])
list(audit_location_generator(devices_filter=devices_filter))
devices_filter.assert_called_once_with(tmpdir, ["drive"])
self.assertIn(((data,),), m_listdir.call_args_list)
m_listdir.reset_mock()
devices_filter = MagicMock(return_value=[])
list(audit_location_generator(devices_filter=devices_filter))
devices_filter.assert_called_once_with(tmpdir, ["drive"])
self.assertNotIn(((data,),), m_listdir.call_args_list)
# partitions_filter
m_listdir.reset_mock()
partitions_filter = MagicMock(return_value=["partition1"])
list(audit_location_generator(
partitions_filter=partitions_filter))
partitions_filter.assert_called_once_with(data,
["partition1"])
self.assertIn(((partition,),), m_listdir.call_args_list)
m_listdir.reset_mock()
partitions_filter = MagicMock(return_value=[])
list(audit_location_generator(
partitions_filter=partitions_filter))
partitions_filter.assert_called_once_with(data,
["partition1"])
self.assertNotIn(((partition,),), m_listdir.call_args_list)
# suffixes_filter
m_listdir.reset_mock()
suffixes_filter = MagicMock(return_value=["suffix1"])
list(audit_location_generator(suffixes_filter=suffixes_filter))
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
self.assertIn(((suffix,),), m_listdir.call_args_list)
m_listdir.reset_mock()
suffixes_filter = MagicMock(return_value=[])
list(audit_location_generator(suffixes_filter=suffixes_filter))
suffixes_filter.assert_called_once_with(partition, ["suffix1"])
self.assertNotIn(((suffix,),), m_listdir.call_args_list)
# hashes_filter
m_listdir.reset_mock()
hashes_filter = MagicMock(return_value=["hash1"])
list(audit_location_generator(hashes_filter=hashes_filter))
hashes_filter.assert_called_once_with(suffix, ["hash1"])
self.assertIn(((hash_path,),), m_listdir.call_args_list)
m_listdir.reset_mock()
hashes_filter = MagicMock(return_value=[])
list(audit_location_generator(hashes_filter=hashes_filter))
hashes_filter.assert_called_once_with(suffix, ["hash1"])
self.assertNotIn(((hash_path,),), m_listdir.call_args_list)
@with_tempdir
def test_error_counter(self, tmpdir):
def assert_no_errors(devices, mount_check=False):
logger = debug_logger()
error_counter = {}
locations = utils.audit_location_generator(
devices, "data", mount_check=mount_check, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual([], logger.get_lines_for_level('warning'))
self.assertEqual([], logger.get_lines_for_level('error'))
self.assertEqual({}, error_counter)
# no devices, no problem
devices = os.path.join(tmpdir, 'devices1')
os.makedirs(devices)
assert_no_errors(devices)
# empty dir under devices/
devices = os.path.join(tmpdir, 'devices2')
os.makedirs(devices)
dev_dir = os.path.join(devices, 'device_is_empty_dir')
os.makedirs(dev_dir)
def assert_listdir_error(devices, expected):
logger = debug_logger()
error_counter = {}
locations = utils.audit_location_generator(
devices, "data", mount_check=False, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
self.assertEqual({'unlistable_partitions': expected},
error_counter)
# file under devices/
devices = os.path.join(tmpdir, 'devices3')
os.makedirs(devices)
with open(os.path.join(devices, 'device_is_file'), 'w'):
pass
listdir_error_data_dir = os.path.join(devices, 'device_is_file',
'data')
assert_listdir_error(devices, [listdir_error_data_dir])
# dir under devices/
devices = os.path.join(tmpdir, 'devices4')
device = os.path.join(devices, 'device')
os.makedirs(device)
expected_datadir = os.path.join(devices, 'device', 'data')
assert_no_errors(devices)
# error for dir under devices/
orig_listdir = utils.listdir
def mocked(path):
if path.endswith('data'):
raise OSError
return orig_listdir(path)
with mock.patch('swift.common.utils.listdir', mocked):
assert_listdir_error(devices, [expected_datadir])
# mount check error
devices = os.path.join(tmpdir, 'devices5')
device = os.path.join(devices, 'device')
os.makedirs(device)
# no check
with mock.patch('swift.common.utils.ismount', return_value=False):
assert_no_errors(devices, mount_check=False)
# check passes
with mock.patch('swift.common.utils.ismount', return_value=True):
assert_no_errors(devices, mount_check=True)
# check fails
logger = debug_logger()
error_counter = {}
with mock.patch('swift.common.utils.ismount', return_value=False):
locations = utils.audit_location_generator(
devices, "data", mount_check=True, logger=logger,
error_counter=error_counter
)
self.assertEqual([], list(locations))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
self.assertEqual({'unmounted': ['device']}, error_counter)
class TestGreenAsyncPile(unittest.TestCase):
def setUp(self):
self.timeout = Timeout(5.0)
def tearDown(self):
self.timeout.cancel()
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertIsNone(next(pile))
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
def _exploder(self, arg):
if isinstance(arg, Exception):
raise arg
else:
return arg
def test_blocking_last_next_explodes(self):
pile = utils.GreenAsyncPile(2)
pile.spawn(self._exploder, 1)
pile.spawn(self._exploder, 2)
pile.spawn(self._exploder, Exception('kaboom'))
self.assertEqual(1, next(pile))
self.assertEqual(2, next(pile))
with mock.patch('sys.stderr', StringIO()) as mock_stderr, \
self.assertRaises(StopIteration):
next(pile)
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
self.assertIn('Exception: kaboom', mock_stderr.getvalue())
self.assertIn('Traceback (most recent call last):',
mock_stderr.getvalue())
def test_no_blocking_last_next_explodes(self):
pile = utils.GreenAsyncPile(10)
pile.spawn(self._exploder, 1)
self.assertEqual(1, next(pile))
pile.spawn(self._exploder, 2)
self.assertEqual(2, next(pile))
pile.spawn(self._exploder, Exception('kaboom'))
with mock.patch('sys.stderr', StringIO()) as mock_stderr, \
self.assertRaises(StopIteration):
next(pile)
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
self.assertIn('Exception: kaboom', mock_stderr.getvalue())
self.assertIn('Traceback (most recent call last):',
mock_stderr.getvalue())
def test_exceptions_in_streaming_pile(self):
with mock.patch('sys.stderr', StringIO()) as mock_stderr, \
utils.StreamingPile(2) as pile:
results = list(pile.asyncstarmap(self._exploder, [
(1,),
(Exception('kaboom'),),
(3,),
]))
self.assertEqual(results, [1, 3])
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
self.assertIn('Exception: kaboom', mock_stderr.getvalue())
self.assertIn('Traceback (most recent call last):',
mock_stderr.getvalue())
def test_exceptions_at_end_of_streaming_pile(self):
with mock.patch('sys.stderr', StringIO()) as mock_stderr, \
utils.StreamingPile(2) as pile:
results = list(pile.asyncstarmap(self._exploder, [
(1,),
(2,),
(Exception('kaboom'),),
]))
self.assertEqual(results, [1, 2])
self.assertEqual(pile.inflight, 0)
self.assertEqual(pile._pending, 0)
self.assertIn('Exception: kaboom', mock_stderr.getvalue())
self.assertIn('Traceback (most recent call last):',
mock_stderr.getvalue())
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestSpliterator(unittest.TestCase):
def test_string(self):
input_chunks = ["coun", "ter-", "b", "ra", "nch-mater",
"nit", "y-fungusy", "-nummular"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(8)), "counter-")
self.assertEqual(''.join(si.take(7)), "branch-")
self.assertEqual(''.join(si.take(10)), "maternity-")
self.assertEqual(''.join(si.take(8)), "fungusy-")
self.assertEqual(''.join(si.take(8)), "nummular")
def test_big_input_string(self):
input_chunks = ["iridium"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(2)), "ir")
self.assertEqual(''.join(si.take(1)), "i")
self.assertEqual(''.join(si.take(2)), "di")
self.assertEqual(''.join(si.take(1)), "u")
self.assertEqual(''.join(si.take(1)), "m")
def test_chunk_boundaries(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(7)), "soylent")
self.assertEqual(''.join(si.take(5)), "green")
self.assertEqual(''.join(si.take(2)), "is")
self.assertEqual(''.join(si.take(6)), "people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestGetExpirerContainer(unittest.TestCase):
@mock.patch.object(utils, 'hash_path', return_value=hex(101)[2:])
def test_get_expirer_container(self, mock_hash_path):
container = utils.get_expirer_container(1234, 20, 'a', 'c', 'o')
self.assertEqual(container, '0000001219')
container = utils.get_expirer_container(1234, 200, 'a', 'c', 'o')
self.assertEqual(container, '0000001199')
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'')
self.assertRaises(StopIteration, next, it)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
self.assertRaises(StopIteration, next, it)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
b'--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabc'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abc')
self.assertRaises(StopIteration, next, it)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
b'jkl\r\n\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
b'\r\njkl\r\n\r\n--unique--'),
b'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = BytesIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=debug_logger())
self.assertEqual(body, '')
def test_single_part(self):
body = b"time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(BytesIO(body).read, b'')}]
resp_body = b''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), b'dontcare',
multipart=False, logger=debug_logger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = b"two peanuts were walking down a railroad track"
part2 = b"and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(BytesIO(part1).read, b''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(BytesIO(part2).read, b''),
}]
resp_body = b''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), b'boundaryboundary',
multipart=True, logger=debug_logger()))
self.assertEqual(resp_body, (
b"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
b"Content-Type: application/peanut\r\n" +
b"Content-Range: bytes 88-133/1024\r\n" +
b"\r\n" +
part1 + b"\r\n" +
b"--boundaryboundary\r\n"
b"Content-Type: application/salted\r\n" +
b"Content-Range: bytes 500-532/1024\r\n" +
b"\r\n" +
part2 + b"\r\n" +
b"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=debug_logger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=debug_logger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = b'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i).encode('utf8') * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = b''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(b''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertIs(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
b'': 'd41d8cd98f00b204e9800998ecf8427e',
b'some data': '1e50210a0202497fb79bc38b6ade6c34',
(b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
class TestFsHasFreeSpace(unittest.TestCase):
def test_bytes(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, False))
self.assertTrue(utils.fs_has_free_space("/", 1, False))
# free space left = f_bavail * f_bsize = 7078252544
self.assertTrue(utils.fs_has_free_space("/", 7078252544, False))
self.assertFalse(utils.fs_has_free_space("/", 7078252545, False))
self.assertFalse(utils.fs_has_free_space("/", 2 ** 64, False))
def test_percent(self):
fake_result = posix.statvfs_result([
4096, # f_bsize
4096, # f_frsize
2854907, # f_blocks
1984802, # f_bfree (free blocks for root)
1728089, # f_bavail (free blocks for non-root)
1280000, # f_files
1266040, # f_ffree,
1266040, # f_favail,
4096, # f_flag
255, # f_namemax
])
with mock.patch('os.statvfs', return_value=fake_result):
self.assertTrue(utils.fs_has_free_space("/", 0, True))
self.assertTrue(utils.fs_has_free_space("/", 1, True))
# percentage of free space for the faked statvfs is 60%
self.assertTrue(utils.fs_has_free_space("/", 60, True))
self.assertFalse(utils.fs_has_free_space("/", 61, True))
self.assertFalse(utils.fs_has_free_space("/", 100, True))
self.assertFalse(utils.fs_has_free_space("/", 110, True))
class TestSetSwiftDir(unittest.TestCase):
def setUp(self):
self.swift_dir = tempfile.mkdtemp()
self.swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
with open(self.swift_conf, "wt") as sc:
sc.write('''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = %s
''' % self.policy_name)
def tearDown(self):
shutil.rmtree(self.swift_dir, ignore_errors=True)
def test_set_swift_dir(self):
set_swift_dir(None)
reload_storage_policies()
self.assertIsNone(POLICIES.get_by_name(self.policy_name))
set_swift_dir(self.swift_dir)
reload_storage_policies()
self.assertIsNotNone(POLICIES.get_by_name(self.policy_name))
class TestPipeMutex(unittest.TestCase):
def setUp(self):
self.mutex = utils.PipeMutex()
def tearDown(self):
self.mutex.close()
def test_nonblocking(self):
evt_lock1 = eventlet.event.Event()
evt_lock2 = eventlet.event.Event()
evt_unlock = eventlet.event.Event()
def get_the_lock():
self.mutex.acquire()
evt_lock1.send('got the lock')
evt_lock2.wait()
self.mutex.release()
evt_unlock.send('released the lock')
eventlet.spawn(get_the_lock)
evt_lock1.wait() # Now, the other greenthread has the lock.
self.assertFalse(self.mutex.acquire(blocking=False))
evt_lock2.send('please release the lock')
evt_unlock.wait() # The other greenthread has released the lock.
self.assertTrue(self.mutex.acquire(blocking=False))
def test_recursive(self):
self.assertTrue(self.mutex.acquire(blocking=False))
self.assertTrue(self.mutex.acquire(blocking=False))
def try_acquire_lock():
return self.mutex.acquire(blocking=False)
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertFalse(eventlet.spawn(try_acquire_lock).wait())
self.mutex.release()
self.assertTrue(eventlet.spawn(try_acquire_lock).wait())
def test_release_without_acquire(self):
self.assertRaises(RuntimeError, self.mutex.release)
def test_too_many_releases(self):
self.mutex.acquire()
self.mutex.release()
self.assertRaises(RuntimeError, self.mutex.release)
def test_wrong_releaser(self):
self.mutex.acquire()
with quiet_eventlet_exceptions():
self.assertRaises(RuntimeError,
eventlet.spawn(self.mutex.release).wait)
def test_blocking(self):
evt = eventlet.event.Event()
sequence = []
def coro1():
eventlet.sleep(0) # let coro2 go
self.mutex.acquire()
sequence.append('coro1 acquire')
evt.send('go')
self.mutex.release()
sequence.append('coro1 release')
def coro2():
evt.wait() # wait for coro1 to start us
self.mutex.acquire()
sequence.append('coro2 acquire')
self.mutex.release()
sequence.append('coro2 release')
c1 = eventlet.spawn(coro1)
c2 = eventlet.spawn(coro2)
c1.wait()
c2.wait()
self.assertEqual(sequence, [
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release'])
def test_blocking_tpool(self):
# Note: this test's success isn't a guarantee that the mutex is
# working. However, this test's failure means that the mutex is
# definitely broken.
sequence = []
def do_stuff():
n = 10
while n > 0:
self.mutex.acquire()
sequence.append("<")
eventlet.sleep(0.0001)
sequence.append(">")
self.mutex.release()
n -= 1
greenthread1 = eventlet.spawn(do_stuff)
greenthread2 = eventlet.spawn(do_stuff)
real_thread1 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
real_thread2.start()
greenthread1.wait()
greenthread2.wait()
real_thread1.join()
real_thread2.join()
self.assertEqual(''.join(sequence), "<>" * 40)
def test_blocking_preserves_ownership(self):
pthread1_event = eventlet.patcher.original('threading').Event()
pthread2_event1 = eventlet.patcher.original('threading').Event()
pthread2_event2 = eventlet.patcher.original('threading').Event()
thread_id = []
owner = []
def pthread1():
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
owner.append(self.mutex.owner)
pthread2_event1.set()
orig_os_write = utils.os.write
def patched_os_write(*a, **kw):
try:
return orig_os_write(*a, **kw)
finally:
pthread1_event.wait()
with mock.patch.object(utils.os, 'write', patched_os_write):
self.mutex.release()
pthread2_event2.set()
def pthread2():
pthread2_event1.wait() # ensure pthread1 acquires lock first
thread_id.append(id(eventlet.greenthread.getcurrent()))
self.mutex.acquire()
pthread1_event.set()
pthread2_event2.wait()
owner.append(self.mutex.owner)
self.mutex.release()
real_thread1 = eventlet.patcher.original('threading').Thread(
target=pthread1)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=pthread2)
real_thread2.start()
real_thread1.join()
real_thread2.join()
self.assertEqual(thread_id, owner)
self.assertIsNone(self.mutex.owner)
@classmethod
def tearDownClass(cls):
# PipeMutex turns this off when you instantiate one
eventlet.debug.hub_prevent_multiple_readers(True)
class TestDistributeEvenly(unittest.TestCase):
def test_evenly_divided(self):
out = utils.distribute_evenly(range(12), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8, 11],
])
out = utils.distribute_evenly(range(12), 4)
self.assertEqual(out, [
[0, 4, 8],
[1, 5, 9],
[2, 6, 10],
[3, 7, 11],
])
def test_uneven(self):
out = utils.distribute_evenly(range(11), 3)
self.assertEqual(out, [
[0, 3, 6, 9],
[1, 4, 7, 10],
[2, 5, 8],
])
def test_just_one(self):
out = utils.distribute_evenly(range(5), 1)
self.assertEqual(out, [[0, 1, 2, 3, 4]])
def test_more_buckets_than_items(self):
out = utils.distribute_evenly(range(5), 7)
self.assertEqual(out, [[0], [1], [2], [3], [4], [], []])
class TestShardName(unittest.TestCase):
def test(self):
ts = utils.Timestamp.now()
created = utils.ShardName.create('a', 'root', 'parent', ts, 1)
parent_hash = md5(b'parent', usedforsecurity=False).hexdigest()
expected = 'a/root-%s-%s-1' % (parent_hash, ts.internal)
actual = str(created)
self.assertEqual(expected, actual)
parsed = utils.ShardName.parse(actual)
# normally a ShardName will be in the .shards prefix
self.assertEqual('a', parsed.account)
self.assertEqual('root', parsed.root_container)
self.assertEqual(parent_hash, parsed.parent_container_hash)
self.assertEqual(ts, parsed.timestamp)
self.assertEqual(1, parsed.index)
self.assertEqual(actual, str(parsed))
def test_root_has_hyphens(self):
parsed = utils.ShardName.parse(
'a/root-has-some-hyphens-hash-1234-99')
self.assertEqual('a', parsed.account)
self.assertEqual('root-has-some-hyphens', parsed.root_container)
self.assertEqual('hash', parsed.parent_container_hash)
self.assertEqual(utils.Timestamp(1234), parsed.timestamp)
self.assertEqual(99, parsed.index)
def test_realistic_shard_range_names(self):
parsed = utils.ShardName.parse(
'.shards_a1/r1-'
'7c92cf1eee8d99cc85f8355a3d6e4b86-'
'1662475499.00000-1')
self.assertEqual('.shards_a1', parsed.account)
self.assertEqual('r1', parsed.root_container)
self.assertEqual('7c92cf1eee8d99cc85f8355a3d6e4b86',
parsed.parent_container_hash)
self.assertEqual(utils.Timestamp(1662475499), parsed.timestamp)
self.assertEqual(1, parsed.index)
parsed = utils.ShardName('.shards_a', 'c', 'hash',
utils.Timestamp(1234), 42)
self.assertEqual(
'.shards_a/c-hash-0000001234.00000-42',
str(parsed))
parsed = utils.ShardName.create('.shards_a', 'c', 'c',
utils.Timestamp(1234), 42)
self.assertEqual(
'.shards_a/c-4a8a08f09d37b73795649038408b5f33-0000001234.00000-42',
str(parsed))
def test_bad_parse(self):
with self.assertRaises(ValueError) as cm:
utils.ShardName.parse('a')
self.assertEqual('invalid name: a', str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.ShardName.parse('a/c')
self.assertEqual('invalid name: a/c', str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.ShardName.parse('a/root-hash-bad')
self.assertEqual('invalid name: a/root-hash-bad', str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.ShardName.parse('a/root-hash-bad-0')
self.assertEqual('invalid name: a/root-hash-bad-0',
str(cm.exception))
with self.assertRaises(ValueError) as cm:
utils.ShardName.parse('a/root-hash-12345678.12345-bad')
self.assertEqual('invalid name: a/root-hash-12345678.12345-bad',
str(cm.exception))
def test_bad_create(self):
with self.assertRaises(ValueError):
utils.ShardName.create('a', 'root', 'hash', 'bad', '0')
with self.assertRaises(ValueError):
utils.ShardName.create('a', 'root', None, '1235678', 'bad')
class TestNamespace(unittest.TestCase):
def test_lower_setter(self):
ns = utils.Namespace('a/c', 'b', '')
# sanity checks
self.assertEqual('b', ns.lower_str)
self.assertEqual(ns.MAX, ns.upper)
def do_test(good_value, expected):
ns.lower = good_value
self.assertEqual(expected, ns.lower)
self.assertEqual(ns.MAX, ns.upper)
do_test(utils.Namespace.MIN, utils.Namespace.MIN)
do_test(utils.Namespace.MAX, utils.Namespace.MAX)
do_test(b'', utils.Namespace.MIN)
do_test(u'', utils.Namespace.MIN)
do_test(None, utils.Namespace.MIN)
do_test(b'a', 'a')
do_test(b'y', 'y')
do_test(u'a', 'a')
do_test(u'y', 'y')
expected = u'\N{SNOWMAN}'
if six.PY2:
expected = expected.encode('utf-8')
with warnings.catch_warnings(record=True) as captured_warnings:
do_test(u'\N{SNOWMAN}', expected)
do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
self.assertFalse(captured_warnings)
ns = utils.Namespace('a/c', 'b', 'y')
ns.lower = ''
self.assertEqual(ns.MIN, ns.lower)
ns = utils.Namespace('a/c', 'b', 'y')
with self.assertRaises(ValueError) as cm:
ns.lower = 'z'
self.assertIn("must be less than or equal to upper", str(cm.exception))
self.assertEqual('b', ns.lower_str)
self.assertEqual('y', ns.upper_str)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
ns.lower = bad_value
self.assertIn("lower must be a string", str(cm.exception))
self.assertEqual('b', ns.lower_str)
self.assertEqual('y', ns.upper_str)
do_test(1)
do_test(1.234)
def test_upper_setter(self):
ns = utils.Namespace('a/c', '', 'y')
# sanity checks
self.assertEqual(ns.MIN, ns.lower)
self.assertEqual('y', ns.upper_str)
def do_test(good_value, expected):
ns.upper = good_value
self.assertEqual(expected, ns.upper)
self.assertEqual(ns.MIN, ns.lower)
do_test(utils.Namespace.MIN, utils.Namespace.MIN)
do_test(utils.Namespace.MAX, utils.Namespace.MAX)
do_test(b'', utils.Namespace.MAX)
do_test(u'', utils.Namespace.MAX)
do_test(None, utils.Namespace.MAX)
do_test(b'z', 'z')
do_test(b'b', 'b')
do_test(u'z', 'z')
do_test(u'b', 'b')
expected = u'\N{SNOWMAN}'
if six.PY2:
expected = expected.encode('utf-8')
with warnings.catch_warnings(record=True) as captured_warnings:
do_test(u'\N{SNOWMAN}', expected)
do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
self.assertFalse(captured_warnings)
ns = utils.Namespace('a/c', 'b', 'y')
ns.upper = ''
self.assertEqual(ns.MAX, ns.upper)
ns = utils.Namespace('a/c', 'b', 'y')
with self.assertRaises(ValueError) as cm:
ns.upper = 'a'
self.assertIn(
"must be greater than or equal to lower",
str(cm.exception))
self.assertEqual('b', ns.lower_str)
self.assertEqual('y', ns.upper_str)
def do_test(bad_value):
with self.assertRaises(TypeError) as cm:
ns.upper = bad_value
self.assertIn("upper must be a string", str(cm.exception))
self.assertEqual('b', ns.lower_str)
self.assertEqual('y', ns.upper_str)
do_test(1)
do_test(1.234)
def test_end_marker(self):
ns = utils.Namespace('a/c', '', 'y')
self.assertEqual('y\x00', ns.end_marker)
ns = utils.Namespace('a/c', '', '')
self.assertEqual('', ns.end_marker)
def test_bounds_serialization(self):
ns = utils.Namespace('a/c', None, None)
self.assertEqual('a/c', ns.name)
self.assertEqual(utils.Namespace.MIN, ns.lower)
self.assertEqual('', ns.lower_str)
self.assertEqual(utils.Namespace.MAX, ns.upper)
self.assertEqual('', ns.upper_str)
self.assertEqual('', ns.end_marker)
lower = u'\u00e4'
upper = u'\u00fb'
ns = utils.Namespace('a/%s-%s' % (lower, upper), lower, upper)
exp_lower = lower
exp_upper = upper
if six.PY2:
exp_lower = exp_lower.encode('utf-8')
exp_upper = exp_upper.encode('utf-8')
self.assertEqual(exp_lower, ns.lower)
self.assertEqual(exp_lower, ns.lower_str)
self.assertEqual(exp_upper, ns.upper)
self.assertEqual(exp_upper, ns.upper_str)
self.assertEqual(exp_upper + '\x00', ns.end_marker)
def test_entire_namespace(self):
# test entire range (no boundaries)
entire = utils.Namespace('a/test', None, None)
self.assertEqual(utils.Namespace.MAX, entire.upper)
self.assertEqual(utils.Namespace.MIN, entire.lower)
self.assertIs(True, entire.entire_namespace())
for x in range(100):
self.assertTrue(str(x) in entire)
self.assertTrue(chr(x) in entire)
for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'):
self.assertTrue(x in entire, '%r should be in %r' % (x, entire))
entire.lower = 'a'
self.assertIs(False, entire.entire_namespace())
def test_comparisons(self):
# upper (if provided) *must* be greater than lower
with self.assertRaises(ValueError):
utils.Namespace('f-a', 'f', 'a')
# test basic boundaries
btoc = utils.Namespace('a/b-c', 'b', 'c')
atof = utils.Namespace('a/a-f', 'a', 'f')
ftol = utils.Namespace('a/f-l', 'f', 'l')
ltor = utils.Namespace('a/l-r', 'l', 'r')
rtoz = utils.Namespace('a/r-z', 'r', 'z')
lower = utils.Namespace('a/lower', '', 'mid')
upper = utils.Namespace('a/upper', 'mid', '')
entire = utils.Namespace('a/test', None, None)
# overlapping ranges
dtof = utils.Namespace('a/d-f', 'd', 'f')
dtom = utils.Namespace('a/d-m', 'd', 'm')
# test range > and <
# non-adjacent
self.assertFalse(rtoz < atof)
self.assertTrue(atof < ltor)
self.assertTrue(ltor > atof)
self.assertFalse(ftol > rtoz)
# adjacent
self.assertFalse(rtoz < ltor)
self.assertTrue(ltor < rtoz)
self.assertFalse(ltor > rtoz)
self.assertTrue(rtoz > ltor)
# wholly within
self.assertFalse(btoc < atof)
self.assertFalse(btoc > atof)
self.assertFalse(atof < btoc)
self.assertFalse(atof > btoc)
self.assertFalse(atof < dtof)
self.assertFalse(dtof > atof)
self.assertFalse(atof > dtof)
self.assertFalse(dtof < atof)
self.assertFalse(dtof < dtom)
self.assertFalse(dtof > dtom)
self.assertFalse(dtom > dtof)
self.assertFalse(dtom < dtof)
# overlaps
self.assertFalse(atof < dtom)
self.assertFalse(atof > dtom)
self.assertFalse(ltor > dtom)
# ranges including min/max bounds
self.assertTrue(upper > lower)
self.assertTrue(lower < upper)
self.assertFalse(upper < lower)
self.assertFalse(lower > upper)
self.assertFalse(lower < entire)
self.assertFalse(entire > lower)
self.assertFalse(lower > entire)
self.assertFalse(entire < lower)
self.assertFalse(upper < entire)
self.assertFalse(entire > upper)
self.assertFalse(upper > entire)
self.assertFalse(entire < upper)
self.assertFalse(entire < entire)
self.assertFalse(entire > entire)
# test range < and > to an item
# range is > lower and <= upper to lower boundary isn't
# actually included
self.assertTrue(ftol > 'f')
self.assertFalse(atof < 'f')
self.assertTrue(ltor < 'y')
self.assertFalse(ftol < 'f')
self.assertFalse(atof > 'f')
self.assertFalse(ltor > 'y')
self.assertTrue('f' < ftol)
self.assertFalse('f' > atof)
self.assertTrue('y' > ltor)
self.assertFalse('f' > ftol)
self.assertFalse('f' < atof)
self.assertFalse('y' < ltor)
# Now test ranges with only 1 boundary
start_to_l = utils.Namespace('a/None-l', '', 'l')
l_to_end = utils.Namespace('a/l-None', 'l', '')
for x in ('l', 'm', 'z', 'zzz1231sd'):
if x == 'l':
self.assertFalse(x in l_to_end)
self.assertFalse(start_to_l < x)
self.assertFalse(x > start_to_l)
else:
self.assertTrue(x in l_to_end)
self.assertTrue(start_to_l < x)
self.assertTrue(x > start_to_l)
# Now test some of the range to range checks with missing boundaries
self.assertFalse(atof < start_to_l)
self.assertFalse(start_to_l < entire)
# Now test ShardRange.overlaps(other)
self.assertTrue(atof.overlaps(atof))
self.assertFalse(atof.overlaps(ftol))
self.assertFalse(ftol.overlaps(atof))
self.assertTrue(atof.overlaps(dtof))
self.assertTrue(dtof.overlaps(atof))
self.assertFalse(dtof.overlaps(ftol))
self.assertTrue(dtom.overlaps(ftol))
self.assertTrue(ftol.overlaps(dtom))
self.assertFalse(start_to_l.overlaps(l_to_end))
def test_contains(self):
lower = utils.Namespace('a/-h', '', 'h')
mid = utils.Namespace('a/h-p', 'h', 'p')
upper = utils.Namespace('a/p-', 'p', '')
entire = utils.Namespace('a/all', '', '')
self.assertTrue('a' in entire)
self.assertTrue('x' in entire)
# the empty string is not a valid object name, so it cannot be in any
# range
self.assertFalse('' in lower)
self.assertFalse('' in upper)
self.assertFalse('' in entire)
self.assertTrue('a' in lower)
self.assertTrue('h' in lower)
self.assertFalse('i' in lower)
self.assertFalse('h' in mid)
self.assertTrue('p' in mid)
self.assertFalse('p' in upper)
self.assertTrue('x' in upper)
self.assertIn(utils.Namespace.MAX, entire)
self.assertNotIn(utils.Namespace.MAX, lower)
self.assertIn(utils.Namespace.MAX, upper)
# lower bound is excluded so MIN cannot be in any range.
self.assertNotIn(utils.Namespace.MIN, entire)
self.assertNotIn(utils.Namespace.MIN, upper)
self.assertNotIn(utils.Namespace.MIN, lower)
def test_includes(self):
_to_h = utils.Namespace('a/-h', '', 'h')
d_to_t = utils.Namespace('a/d-t', 'd', 't')
d_to_k = utils.Namespace('a/d-k', 'd', 'k')
e_to_l = utils.Namespace('a/e-l', 'e', 'l')
k_to_t = utils.Namespace('a/k-t', 'k', 't')
p_to_ = utils.Namespace('a/p-', 'p', '')
t_to_ = utils.Namespace('a/t-', 't', '')
entire = utils.Namespace('a/all', '', '')
self.assertTrue(entire.includes(entire))
self.assertTrue(d_to_t.includes(d_to_t))
self.assertTrue(_to_h.includes(_to_h))
self.assertTrue(p_to_.includes(p_to_))
self.assertTrue(entire.includes(_to_h))
self.assertTrue(entire.includes(d_to_t))
self.assertTrue(entire.includes(p_to_))
self.assertTrue(d_to_t.includes(d_to_k))
self.assertTrue(d_to_t.includes(e_to_l))
self.assertTrue(d_to_t.includes(k_to_t))
self.assertTrue(p_to_.includes(t_to_))
self.assertFalse(_to_h.includes(d_to_t))
self.assertFalse(p_to_.includes(d_to_t))
self.assertFalse(k_to_t.includes(d_to_k))
self.assertFalse(d_to_k.includes(e_to_l))
self.assertFalse(k_to_t.includes(e_to_l))
self.assertFalse(t_to_.includes(p_to_))
self.assertFalse(_to_h.includes(entire))
self.assertFalse(p_to_.includes(entire))
self.assertFalse(d_to_t.includes(entire))
def test_expand(self):
bounds = (('', 'd'), ('d', 'k'), ('k', 't'), ('t', ''))
donors = [
utils.Namespace('a/c-%d' % i, b[0], b[1])
for i, b in enumerate(bounds)
]
acceptor = utils.Namespace('a/c-acc', 'f', 's')
self.assertTrue(acceptor.expand(donors[:1]))
self.assertEqual((utils.Namespace.MIN, 's'),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', 'f', 's')
self.assertTrue(acceptor.expand(donors[:2]))
self.assertEqual((utils.Namespace.MIN, 's'),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', 'f', 's')
self.assertTrue(acceptor.expand(donors[1:3]))
self.assertEqual(('d', 't'),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', 'f', 's')
self.assertTrue(acceptor.expand(donors))
self.assertEqual((utils.Namespace.MIN, utils.Namespace.MAX),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', 'f', 's')
self.assertTrue(acceptor.expand(donors[1:2] + donors[3:]))
self.assertEqual(('d', utils.Namespace.MAX),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', '', 'd')
self.assertFalse(acceptor.expand(donors[:1]))
self.assertEqual((utils.Namespace.MIN, 'd'),
(acceptor.lower, acceptor.upper))
acceptor = utils.Namespace('a/c-acc', 'b', 'v')
self.assertFalse(acceptor.expand(donors[1:3]))
self.assertEqual(('b', 'v'),
(acceptor.lower, acceptor.upper))
def test_total_ordering(self):
a_start_ns = utils.Namespace('a/-a', '', 'a')
a_atob_ns = utils.Namespace('a/a-b', 'a', 'b')
a_atof_ns = utils.Namespace('a/a-f', 'a', 'f')
a_ftol_ns = utils.Namespace('a/f-l', 'f', 'l')
a_ltor_ns = utils.Namespace('a/l-r', 'l', 'r')
a_rtoz_ns = utils.Namespace('a/r-z', 'r', 'z')
a_end_ns = utils.Namespace('a/z-', 'z', '')
b_start_ns = utils.Namespace('b/-a', '', 'a')
self.assertEqual(a_start_ns, b_start_ns)
self.assertNotEqual(a_start_ns, a_atob_ns)
self.assertLess(a_start_ns, a_atob_ns)
self.assertLess(a_atof_ns, a_ftol_ns)
self.assertLess(a_ftol_ns, a_ltor_ns)
self.assertLess(a_ltor_ns, a_rtoz_ns)
self.assertLess(a_rtoz_ns, a_end_ns)
self.assertLessEqual(a_start_ns, a_atof_ns)
self.assertLessEqual(a_atof_ns, a_rtoz_ns)
self.assertLessEqual(a_atof_ns, a_atof_ns)
self.assertGreater(a_end_ns, a_atof_ns)
self.assertGreater(a_rtoz_ns, a_ftol_ns)
self.assertGreater(a_end_ns, a_start_ns)
self.assertGreaterEqual(a_atof_ns, a_atof_ns)
self.assertGreaterEqual(a_end_ns, a_atof_ns)
self.assertGreaterEqual(a_rtoz_ns, a_start_ns)
class TestNamespaceBoundList(unittest.TestCase):
def setUp(self):
start = ['', 'a/-a']
self.start_ns = utils.Namespace('a/-a', '', 'a')
atof = ['a', 'a/a-f']
self.atof_ns = utils.Namespace('a/a-f', 'a', 'f')
ftol = ['f', 'a/f-l']
self.ftol_ns = utils.Namespace('a/f-l', 'f', 'l')
ltor = ['l', 'a/l-r']
self.ltor_ns = utils.Namespace('a/l-r', 'l', 'r')
rtoz = ['r', 'a/r-z']
self.rtoz_ns = utils.Namespace('a/r-z', 'r', 'z')
end = ['z', 'a/z-']
self.end_ns = utils.Namespace('a/z-', 'z', '')
self.lowerbounds = [start, atof, ftol, ltor, rtoz, end]
def test_get_namespace(self):
namespace_list = utils.NamespaceBoundList(self.lowerbounds)
self.assertEqual(namespace_list.bounds, self.lowerbounds)
self.assertEqual(namespace_list.get_namespace('1'), self.start_ns)
self.assertEqual(namespace_list.get_namespace('a'), self.start_ns)
self.assertEqual(namespace_list.get_namespace('b'), self.atof_ns)
self.assertEqual(namespace_list.get_namespace('f'), self.atof_ns)
self.assertEqual(namespace_list.get_namespace('f\x00'), self.ftol_ns)
self.assertEqual(namespace_list.get_namespace('l'), self.ftol_ns)
self.assertEqual(namespace_list.get_namespace('x'), self.rtoz_ns)
self.assertEqual(namespace_list.get_namespace('r'), self.ltor_ns)
self.assertEqual(namespace_list.get_namespace('}'), self.end_ns)
def test_parse(self):
namespaces_list = utils.NamespaceBoundList.parse(None)
self.assertEqual(namespaces_list, None)
namespaces = [self.start_ns, self.atof_ns, self.ftol_ns,
self.ltor_ns, self.rtoz_ns, self.end_ns]
namespace_list = utils.NamespaceBoundList.parse(namespaces)
self.assertEqual(namespace_list.bounds, self.lowerbounds)
self.assertEqual(namespace_list.get_namespace('1'), self.start_ns)
self.assertEqual(namespace_list.get_namespace('l'), self.ftol_ns)
self.assertEqual(namespace_list.get_namespace('x'), self.rtoz_ns)
self.assertEqual(namespace_list.get_namespace('r'), self.ltor_ns)
self.assertEqual(namespace_list.get_namespace('}'), self.end_ns)
self.assertEqual(namespace_list.bounds, self.lowerbounds)
overlap_f_ns = utils.Namespace('a/-f', '', 'f')
overlapping_namespaces = [self.start_ns, self.atof_ns, overlap_f_ns,
self.ftol_ns, self.ltor_ns, self.rtoz_ns,
self.end_ns]
namespace_list = utils.NamespaceBoundList.parse(
overlapping_namespaces)
self.assertEqual(namespace_list.bounds, self.lowerbounds)
overlap_l_ns = utils.Namespace('a/a-l', 'a', 'l')
overlapping_namespaces = [self.start_ns, self.atof_ns, self.ftol_ns,
overlap_l_ns, self.ltor_ns, self.rtoz_ns,
self.end_ns]
namespace_list = utils.NamespaceBoundList.parse(
overlapping_namespaces)
self.assertEqual(namespace_list.bounds, self.lowerbounds)
class TestShardRange(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
def test_constants(self):
self.assertEqual({utils.ShardRange.SHARDING,
utils.ShardRange.SHARDED,
utils.ShardRange.SHRINKING,
utils.ShardRange.SHRUNK},
set(utils.ShardRange.CLEAVING_STATES))
self.assertEqual({utils.ShardRange.SHARDING,
utils.ShardRange.SHARDED},
set(utils.ShardRange.SHARDING_STATES))
self.assertEqual({utils.ShardRange.SHRINKING,
utils.ShardRange.SHRUNK},
set(utils.ShardRange.SHRINKING_STATES))
def test_min_max_bounds(self):
with self.assertRaises(TypeError):
utils.NamespaceOuterBound()
# max
self.assertEqual(utils.ShardRange.MAX, utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX > utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX < utils.ShardRange.MAX)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MAX == val)
self.assertFalse(val > utils.ShardRange.MAX)
self.assertTrue(val < utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX > val)
self.assertFalse(utils.ShardRange.MAX < val)
self.assertEqual('', str(utils.ShardRange.MAX))
self.assertFalse(utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX == utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX != utils.ShardRange.MAX)
self.assertTrue(
utils.ShardRange.MaxBound() == utils.ShardRange.MaxBound())
self.assertTrue(
utils.ShardRange.MaxBound() is utils.ShardRange.MaxBound())
self.assertTrue(
utils.ShardRange.MaxBound() is utils.ShardRange.MAX)
self.assertFalse(
utils.ShardRange.MaxBound() != utils.ShardRange.MaxBound())
# min
self.assertEqual(utils.ShardRange.MIN, utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN > utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN < utils.ShardRange.MIN)
for val in 'z', u'\u00e4':
self.assertFalse(utils.ShardRange.MIN == val)
self.assertFalse(val < utils.ShardRange.MIN)
self.assertTrue(val > utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN < val)
self.assertFalse(utils.ShardRange.MIN > val)
self.assertFalse(utils.ShardRange.MIN)
self.assertEqual('', str(utils.ShardRange.MIN))
self.assertFalse(utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN != utils.ShardRange.MIN)
self.assertTrue(
utils.ShardRange.MinBound() == utils.ShardRange.MinBound())
self.assertTrue(
utils.ShardRange.MinBound() is utils.ShardRange.MinBound())
self.assertTrue(
utils.ShardRange.MinBound() is utils.ShardRange.MIN)
self.assertFalse(
utils.ShardRange.MinBound() != utils.ShardRange.MinBound())
self.assertFalse(utils.ShardRange.MAX == utils.ShardRange.MIN)
self.assertFalse(utils.ShardRange.MIN == utils.ShardRange.MAX)
self.assertTrue(utils.ShardRange.MAX != utils.ShardRange.MIN)
self.assertTrue(utils.ShardRange.MIN != utils.ShardRange.MAX)
self.assertFalse(utils.ShardRange.MAX is utils.ShardRange.MIN)
self.assertEqual(utils.ShardRange.MAX,
max(utils.ShardRange.MIN, utils.ShardRange.MAX))
self.assertEqual(utils.ShardRange.MIN,
min(utils.ShardRange.MIN, utils.ShardRange.MAX))
# check the outer bounds are hashable
hashmap = {utils.ShardRange.MIN: 'min',
utils.ShardRange.MAX: 'max'}
self.assertEqual(hashmap[utils.ShardRange.MIN], 'min')
self.assertEqual(hashmap[utils.ShardRange.MinBound()], 'min')
self.assertEqual(hashmap[utils.ShardRange.MAX], 'max')
self.assertEqual(hashmap[utils.ShardRange.MaxBound()], 'max')
def test_shard_range_initialisation(self):
def assert_initialisation_ok(params, expected):
pr = utils.ShardRange(**params)
self.assertDictEqual(dict(pr), expected)
def assert_initialisation_fails(params, err_type=ValueError):
with self.assertRaises(err_type):
utils.ShardRange(**params)
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
empty_run = dict(name=None, timestamp=None, lower=None,
upper=None, object_count=0, bytes_used=0,
meta_timestamp=None, deleted=0,
state=utils.ShardRange.FOUND, state_timestamp=None,
epoch=None)
# name, timestamp must be given
assert_initialisation_fails(empty_run.copy())
assert_initialisation_fails(dict(empty_run, name='a/c'), TypeError)
assert_initialisation_fails(dict(empty_run, timestamp=ts_1))
# name must be form a/c
assert_initialisation_fails(dict(empty_run, name='c', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='', timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/a/c',
timestamp=ts_1))
assert_initialisation_fails(dict(empty_run, name='/c',
timestamp=ts_1))
# lower, upper can be None
expect = dict(name='a/c', timestamp=ts_1.internal, lower='',
upper='', object_count=0, bytes_used=0,
meta_timestamp=ts_1.internal, deleted=0,
state=utils.ShardRange.FOUND,
state_timestamp=ts_1.internal, epoch=None,
reported=0, tombstones=-1)
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
expect)
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
good_run = dict(name='a/c', timestamp=ts_1, lower='l',
upper='u', object_count=2, bytes_used=10,
meta_timestamp=ts_2, deleted=0,
state=utils.ShardRange.CREATED,
state_timestamp=ts_3.internal, epoch=ts_4,
reported=0, tombstones=11)
expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2,
'bytes_used': 10, 'meta_timestamp': ts_2.internal,
'state': utils.ShardRange.CREATED,
'state_timestamp': ts_3.internal, 'epoch': ts_4,
'reported': 0, 'tombstones': 11})
assert_initialisation_ok(good_run.copy(), expect)
# obj count, tombstones and bytes used as int strings
good_str_run = good_run.copy()
good_str_run.update({'object_count': '2', 'bytes_used': '10',
'tombstones': '11'})
assert_initialisation_ok(good_str_run, expect)
good_no_meta = good_run.copy()
good_no_meta.pop('meta_timestamp')
assert_initialisation_ok(good_no_meta,
dict(expect, meta_timestamp=ts_1.internal))
good_deleted = good_run.copy()
good_deleted['deleted'] = 1
assert_initialisation_ok(good_deleted,
dict(expect, deleted=1))
good_reported = good_run.copy()
good_reported['reported'] = 1
assert_initialisation_ok(good_reported,
dict(expect, reported=1))
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
assert_initialisation_fails(
dict(good_run, meta_timestamp='water balloon'))
assert_initialisation_fails(dict(good_run, lower='water balloon'))
assert_initialisation_fails(dict(good_run, upper='balloon'))
assert_initialisation_fails(
dict(good_run, object_count='water balloon'))
assert_initialisation_fails(dict(good_run, bytes_used='water ballon'))
assert_initialisation_fails(dict(good_run, object_count=-1))
assert_initialisation_fails(dict(good_run, bytes_used=-1))
assert_initialisation_fails(dict(good_run, state=-1))
assert_initialisation_fails(dict(good_run, state_timestamp='not a ts'))
assert_initialisation_fails(dict(good_run, name='/a/c'))
assert_initialisation_fails(dict(good_run, name='/a/c/'))
assert_initialisation_fails(dict(good_run, name='a/c/'))
assert_initialisation_fails(dict(good_run, name='a'))
assert_initialisation_fails(dict(good_run, name=''))
def _check_to_from_dict(self, lower, upper):
ts_1 = next(self.ts_iter)
ts_2 = next(self.ts_iter)
ts_3 = next(self.ts_iter)
ts_4 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, lower, upper, 10, 100, ts_2,
state=None, state_timestamp=ts_3, epoch=ts_4)
sr_dict = dict(sr)
expected = {
'name': 'a/test', 'timestamp': ts_1.internal, 'lower': lower,
'upper': upper, 'object_count': 10, 'bytes_used': 100,
'meta_timestamp': ts_2.internal, 'deleted': 0,
'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal,
'epoch': ts_4, 'reported': 0, 'tombstones': -1}
self.assertEqual(expected, sr_dict)
self.assertIsInstance(sr_dict['lower'], six.string_types)
self.assertIsInstance(sr_dict['upper'], six.string_types)
sr_new = utils.ShardRange.from_dict(sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
sr_new = utils.ShardRange(**sr_dict)
self.assertEqual(sr, sr_new)
self.assertEqual(sr_dict, dict(sr_new))
for key in sr_dict:
bad_dict = dict(sr_dict)
bad_dict.pop(key)
if key in ('reported', 'tombstones'):
# These were added after the fact, and we need to be able to
# eat data from old servers
utils.ShardRange.from_dict(bad_dict)
utils.ShardRange(**bad_dict)
continue
# The rest were present from the beginning
with self.assertRaises(KeyError):
utils.ShardRange.from_dict(bad_dict)
# But __init__ still (generally) works!
if key != 'name':
utils.ShardRange(**bad_dict)
else:
with self.assertRaises(TypeError):
utils.ShardRange(**bad_dict)
def test_to_from_dict(self):
self._check_to_from_dict('l', 'u')
self._check_to_from_dict('', '')
def test_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
ts_2 = next(self.ts_iter)
sr.timestamp = ts_2
self.assertEqual(ts_2, sr.timestamp)
sr.timestamp = 0
self.assertEqual(utils.Timestamp(0), sr.timestamp)
with self.assertRaises(TypeError):
sr.timestamp = None
def test_meta_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.meta_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.meta_timestamp)
# meta_timestamp defaults to tracking timestamp
sr.meta_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.meta_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.meta_timestamp)
sr.meta_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.meta_timestamp)
def test_update_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(9, 99)
self.assertEqual(9, sr.object_count)
self.assertEqual(99, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_meta(99, 999, None)
self.assertEqual(99, sr.object_count)
self.assertEqual(999, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
ts_2 = next(self.ts_iter)
sr.update_meta(21, 2112, ts_2)
self.assertEqual(21, sr.object_count)
self.assertEqual(2112, sr.bytes_used)
self.assertEqual(ts_2, sr.meta_timestamp)
sr.update_meta('11', '12')
self.assertEqual(11, sr.object_count)
self.assertEqual(12, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.update_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
check_bad_args(10, 11, 'bad')
def test_increment_meta(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 1, 2, None)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.increment_meta(9, 99)
self.assertEqual(10, sr.object_count)
self.assertEqual(101, sr.bytes_used)
self.assertEqual(now, sr.meta_timestamp)
sr.increment_meta('11', '12')
self.assertEqual(21, sr.object_count)
self.assertEqual(113, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.increment_meta(*args)
check_bad_args('bad', 10)
check_bad_args(10, 'bad')
def test_update_tombstones(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(-1, sr.tombstones)
self.assertFalse(sr.reported)
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_tombstones(1)
self.assertEqual(1, sr.tombstones)
self.assertEqual(now, sr.meta_timestamp)
self.assertFalse(sr.reported)
sr.reported = True
with mock_timestamp_now(next(self.ts_iter)) as now:
sr.update_tombstones(3, None)
self.assertEqual(3, sr.tombstones)
self.assertEqual(now, sr.meta_timestamp)
self.assertFalse(sr.reported)
sr.reported = True
ts_2 = next(self.ts_iter)
sr.update_tombstones(5, ts_2)
self.assertEqual(5, sr.tombstones)
self.assertEqual(ts_2, sr.meta_timestamp)
self.assertFalse(sr.reported)
# no change in value -> no change in reported
sr.reported = True
ts_3 = next(self.ts_iter)
sr.update_tombstones(5, ts_3)
self.assertEqual(5, sr.tombstones)
self.assertEqual(ts_3, sr.meta_timestamp)
self.assertTrue(sr.reported)
sr.update_meta('11', '12')
self.assertEqual(11, sr.object_count)
self.assertEqual(12, sr.bytes_used)
def check_bad_args(*args):
with self.assertRaises(ValueError):
sr.update_tombstones(*args)
check_bad_args('bad')
check_bad_args(10, 'bad')
def test_row_count(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(0, sr.row_count)
sr.update_meta(11, 123)
self.assertEqual(11, sr.row_count)
sr.update_tombstones(13)
self.assertEqual(24, sr.row_count)
sr.update_meta(0, 0)
self.assertEqual(13, sr.row_count)
def test_state_timestamp_setter(self):
ts_1 = next(self.ts_iter)
sr = utils.ShardRange('a/test', ts_1, 'l', 'u', 0, 0, None)
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_1, sr.state_timestamp)
ts_2 = next(self.ts_iter)
sr.state_timestamp = ts_2
self.assertEqual(ts_1, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
ts_3 = next(self.ts_iter)
sr.timestamp = ts_3
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_2, sr.state_timestamp)
# state_timestamp defaults to tracking timestamp
sr.state_timestamp = None
self.assertEqual(ts_3, sr.timestamp)
self.assertEqual(ts_3, sr.state_timestamp)
ts_4 = next(self.ts_iter)
sr.timestamp = ts_4
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(ts_4, sr.state_timestamp)
sr.state_timestamp = 0
self.assertEqual(ts_4, sr.timestamp)
self.assertEqual(utils.Timestamp(0), sr.state_timestamp)
def test_state_setter(self):
for state, state_name in utils.ShardRange.STATES.items():
for test_value in (
state, str(state), state_name, state_name.upper()):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
sr.state = test_value
actual = sr.state
self.assertEqual(
state, actual,
'Expected %s but got %s for %s' %
(state, actual, test_value)
)
for bad_state in (max(utils.ShardRange.STATES) + 1,
-1, 99, None, 'stringy', 1.1):
sr = utils.ShardRange('a/test', next(self.ts_iter), 'l', 'u')
with self.assertRaises(ValueError) as cm:
sr.state = bad_state
self.assertIn('Invalid state', str(cm.exception))
def test_update_state(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
self.assertEqual(utils.ShardRange.FOUND, sr.state)
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
for state in utils.ShardRange.STATES:
if state == utils.ShardRange.FOUND:
continue
self.assertTrue(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
self.assertFalse(sr.update_state(state))
self.assertEqual(dict(old_sr, state=state), dict(sr))
sr = utils.ShardRange('a/c', next(self.ts_iter))
old_sr = sr.copy()
for state in utils.ShardRange.STATES:
ts = next(self.ts_iter)
self.assertTrue(sr.update_state(state, state_timestamp=ts))
self.assertEqual(dict(old_sr, state=state, state_timestamp=ts),
dict(sr))
def test_resolve_state(self):
for name, number in utils.ShardRange.STATES_BY_NAME.items():
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.upper()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(name.title()))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(number))
self.assertEqual(
(number, name), utils.ShardRange.resolve_state(str(number)))
def check_bad_value(value):
with self.assertRaises(ValueError) as cm:
utils.ShardRange.resolve_state(value)
self.assertIn('Invalid state %r' % value, str(cm.exception))
check_bad_value(min(utils.ShardRange.STATES) - 1)
check_bad_value(max(utils.ShardRange.STATES) + 1)
check_bad_value('badstate')
def test_epoch_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
self.assertIsNone(sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts
self.assertEqual(ts, sr.epoch)
ts = next(self.ts_iter)
sr.epoch = ts.internal
self.assertEqual(ts, sr.epoch)
sr.epoch = None
self.assertIsNone(sr.epoch)
with self.assertRaises(ValueError):
sr.epoch = 'bad'
def test_deleted_setter(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
for val in (True, 1):
sr.deleted = val
self.assertIs(True, sr.deleted)
for val in (False, 0, None):
sr.deleted = val
self.assertIs(False, sr.deleted)
def test_set_deleted(self):
sr = utils.ShardRange('a/c', next(self.ts_iter))
# initialise other timestamps
sr.update_state(utils.ShardRange.ACTIVE,
state_timestamp=utils.Timestamp.now())
sr.update_meta(1, 2)
old_sr = sr.copy()
self.assertIs(False, sr.deleted) # sanity check
self.assertEqual(dict(sr), dict(old_sr)) # sanity check
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
old_sr_dict = dict(old_sr)
old_sr_dict.pop('deleted')
old_sr_dict.pop('timestamp')
sr_dict = dict(sr)
sr_dict.pop('deleted')
sr_dict.pop('timestamp')
self.assertEqual(old_sr_dict, sr_dict)
# no change
self.assertFalse(sr.set_deleted())
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
# force timestamp change
with mock_timestamp_now(next(self.ts_iter)) as now:
self.assertTrue(sr.set_deleted(timestamp=now))
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
def test_repr(self):
ts = next(self.ts_iter)
ts.offset = 1234
meta_ts = next(self.ts_iter)
state_ts = next(self.ts_iter)
sr = utils.ShardRange('a/c', ts, 'l', 'u', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.ACTIVE,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<%r to %r as of %s, (100, 1000) as of %s, "
"active as of %s>"
% ('l', 'u',
ts.internal, meta_ts.internal, state_ts.internal), str(sr))
ts.offset = 0
meta_ts.offset = 2
state_ts.offset = 3
sr = utils.ShardRange('a/c', ts, '', '', 100, 1000,
meta_timestamp=meta_ts,
state=utils.ShardRange.FOUND,
state_timestamp=state_ts)
self.assertEqual(
"ShardRange<MinBound to MaxBound as of %s, (100, 1000) as of %s, "
"found as of %s>"
% (ts.internal, meta_ts.internal, state_ts.internal), str(sr))
def test_copy(self):
sr = utils.ShardRange('a/c', next(self.ts_iter), 'x', 'y', 99, 99000,
meta_timestamp=next(self.ts_iter),
state=utils.ShardRange.CREATED,
state_timestamp=next(self.ts_iter))
new = sr.copy()
self.assertEqual(dict(sr), dict(new))
new = sr.copy(deleted=1)
self.assertEqual(dict(sr, deleted=1), dict(new))
new_timestamp = next(self.ts_iter)
new = sr.copy(timestamp=new_timestamp)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal),
dict(new))
new = sr.copy(timestamp=new_timestamp, object_count=99)
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
meta_timestamp=new_timestamp.internal,
state_timestamp=new_timestamp.internal,
object_count=99),
dict(new))
def test_make_path(self):
ts = utils.Timestamp.now()
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 0)
parent_hash = md5(b'parent', usedforsecurity=False).hexdigest()
self.assertEqual('a/root-%s-%s-0' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, 3)
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path('a', 'root', 'parent', ts, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
actual = utils.ShardRange.make_path(
'a', 'root', 'parent', ts.internal, '3')
self.assertEqual('a/root-%s-%s-3' % (parent_hash, ts.internal), actual)
def test_is_child_of(self):
# Set up some shard ranges in relational hierarchy:
# account -> root -> grandparent -> parent -> child
# using abbreviated names a_r_gp_p_c
# account 1
ts = next(self.ts_iter)
a1_r1 = utils.ShardRange('a1/r1', ts)
ts = next(self.ts_iter)
a1_r1_gp1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 1), ts)
ts = next(self.ts_iter)
a1_r1_gp1_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 1), ts)
ts = next(self.ts_iter)
a1_r1_gp1_p1_c1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1_p1.container, ts, 1), ts)
ts = next(self.ts_iter)
a1_r1_gp1_p1_c2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1_p1.container, ts, 2), ts)
ts = next(self.ts_iter)
a1_r1_gp1_p2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 2), ts)
ts = next(self.ts_iter)
a1_r1_gp2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 2), ts) # different index
ts = next(self.ts_iter)
a1_r1_gp2_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp2.container, ts, 1), ts)
# drop the index from grandparent name
ts = next(self.ts_iter)
rogue_a1_r1_gp = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 1)[:-2], ts)
# account 1, root 2
ts = next(self.ts_iter)
a1_r2 = utils.ShardRange('a1/r2', ts)
ts = next(self.ts_iter)
a1_r2_gp1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r2', a1_r2.container, ts, 1), ts)
ts = next(self.ts_iter)
a1_r2_gp1_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r2', a1_r2_gp1.container, ts, 3), ts)
# account 2, root1
a2_r1 = utils.ShardRange('a2/r1', ts)
ts = next(self.ts_iter)
a2_r1_gp1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a2', 'r1', a2_r1.container, ts, 1), ts)
ts = next(self.ts_iter)
a2_r1_gp1_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a2', 'r1', a2_r1_gp1.container, ts, 3), ts)
# verify parent-child within same account.
self.assertTrue(a1_r1_gp1.is_child_of(a1_r1))
self.assertTrue(a1_r1_gp1_p1.is_child_of(a1_r1_gp1))
self.assertTrue(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp1_p1))
self.assertTrue(a1_r1_gp1_p1_c2.is_child_of(a1_r1_gp1_p1))
self.assertTrue(a1_r1_gp1_p2.is_child_of(a1_r1_gp1))
self.assertTrue(a1_r1_gp2.is_child_of(a1_r1))
self.assertTrue(a1_r1_gp2_p1.is_child_of(a1_r1_gp2))
self.assertTrue(a1_r2_gp1.is_child_of(a1_r2))
self.assertTrue(a1_r2_gp1_p1.is_child_of(a1_r2_gp1))
self.assertTrue(a2_r1_gp1.is_child_of(a2_r1))
self.assertTrue(a2_r1_gp1_p1.is_child_of(a2_r1_gp1))
# verify not parent-child within same account.
self.assertFalse(a1_r1.is_child_of(a1_r1))
self.assertFalse(a1_r1.is_child_of(a1_r2))
self.assertFalse(a1_r1_gp1.is_child_of(a1_r2))
self.assertFalse(a1_r1_gp1.is_child_of(a1_r1_gp1))
self.assertFalse(a1_r1_gp1.is_child_of(a1_r1_gp1_p1))
self.assertFalse(a1_r1_gp1.is_child_of(a1_r1_gp1_p1_c1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r2))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1_gp2))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r2_gp1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(rogue_a1_r1_gp))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1_gp1_p1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1_gp1_p2))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r2_gp1_p1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1_gp1_p1_c1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a1_r1_gp1_p1_c2))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp1))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp1_p2))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp2_p1))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp1_p1_c1))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r1_gp1_p1_c2))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a1_r2_gp1_p1))
self.assertFalse(a1_r1_gp1_p1_c1.is_child_of(a2_r1_gp1_p1))
self.assertFalse(a1_r2_gp1.is_child_of(a1_r1))
self.assertFalse(a1_r2_gp1_p1.is_child_of(a1_r1_gp1))
# across different accounts, 'is_child_of' works in some cases but not
# all, so don't use it for shard ranges in different accounts.
self.assertFalse(a1_r1.is_child_of(a2_r1))
self.assertFalse(a2_r1_gp1_p1.is_child_of(a1_r1_gp1))
self.assertFalse(a1_r1_gp1_p1.is_child_of(a2_r1))
self.assertTrue(a1_r1_gp1.is_child_of(a2_r1))
self.assertTrue(a2_r1_gp1.is_child_of(a1_r1))
def test_find_root(self):
# account 1
ts = next(self.ts_iter)
a1_r1 = utils.ShardRange('a1/r1', ts)
ts = next(self.ts_iter)
a1_r1_gp1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 1), ts, '', 'l')
ts = next(self.ts_iter)
a1_r1_gp1_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 1), ts, 'a', 'k')
ts = next(self.ts_iter)
a1_r1_gp1_p1_c1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1_p1.container, ts, 1), ts, 'a', 'j')
ts = next(self.ts_iter)
a1_r1_gp1_p2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 2), ts, 'k', 'l')
ts = next(self.ts_iter)
a1_r1_gp2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 2), ts, 'l', '') # different index
# full ancestry plus some others
all_shard_ranges = [a1_r1, a1_r1_gp1, a1_r1_gp1_p1, a1_r1_gp1_p1_c1,
a1_r1_gp1_p2, a1_r1_gp2]
random.shuffle(all_shard_ranges)
self.assertIsNone(a1_r1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1_p1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1_p1_c1.find_root(all_shard_ranges))
# missing a1_r1_gp1_p1
all_shard_ranges = [a1_r1, a1_r1_gp1, a1_r1_gp1_p1_c1,
a1_r1_gp1_p2, a1_r1_gp2]
random.shuffle(all_shard_ranges)
self.assertIsNone(a1_r1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1_p1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1_p1_c1.find_root(all_shard_ranges))
# empty list
self.assertIsNone(a1_r1_gp1_p1_c1.find_root([]))
# double entry
all_shard_ranges = [a1_r1, a1_r1, a1_r1_gp1, a1_r1_gp1]
random.shuffle(all_shard_ranges)
self.assertEqual(a1_r1, a1_r1_gp1_p1.find_root(all_shard_ranges))
self.assertEqual(a1_r1, a1_r1_gp1_p1_c1.find_root(all_shard_ranges))
def test_find_ancestors(self):
# account 1
ts = next(self.ts_iter)
a1_r1 = utils.ShardRange('a1/r1', ts)
ts = next(self.ts_iter)
a1_r1_gp1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 1), ts, '', 'l')
ts = next(self.ts_iter)
a1_r1_gp1_p1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 1), ts, 'a', 'k')
ts = next(self.ts_iter)
a1_r1_gp1_p1_c1 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1_p1.container, ts, 1), ts, 'a', 'j')
ts = next(self.ts_iter)
a1_r1_gp1_p2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', a1_r1_gp1.container, ts, 2), ts, 'k', 'l')
ts = next(self.ts_iter)
a1_r1_gp2 = utils.ShardRange(utils.ShardRange.make_path(
'.shards_a1', 'r1', 'r1', ts, 2), ts, 'l', '') # different index
# full ancestry plus some others
all_shard_ranges = [a1_r1, a1_r1_gp1, a1_r1_gp1_p1, a1_r1_gp1_p1_c1,
a1_r1_gp1_p2, a1_r1_gp2]
random.shuffle(all_shard_ranges)
self.assertEqual([], a1_r1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1], a1_r1_gp1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1_gp1, a1_r1],
a1_r1_gp1_p1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1_gp1_p1, a1_r1_gp1, a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
# missing a1_r1_gp1_p1
all_shard_ranges = [a1_r1, a1_r1_gp1, a1_r1_gp1_p1_c1,
a1_r1_gp1_p2, a1_r1_gp2]
random.shuffle(all_shard_ranges)
self.assertEqual([], a1_r1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1], a1_r1_gp1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1_gp1, a1_r1],
a1_r1_gp1_p1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
# missing a1_r1_gp1
all_shard_ranges = [a1_r1, a1_r1_gp1_p1, a1_r1_gp1_p1_c1,
a1_r1_gp1_p2, a1_r1_gp2]
random.shuffle(all_shard_ranges)
self.assertEqual([], a1_r1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1], a1_r1_gp1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1],
a1_r1_gp1_p1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1_gp1_p1, a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
# empty list
self.assertEqual([], a1_r1_gp1_p1_c1.find_ancestors([]))
# double entry
all_shard_ranges = [a1_r1, a1_r1, a1_r1_gp1, a1_r1_gp1]
random.shuffle(all_shard_ranges)
self.assertEqual([a1_r1_gp1, a1_r1],
a1_r1_gp1_p1.find_ancestors(all_shard_ranges))
self.assertEqual([a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
all_shard_ranges = [a1_r1, a1_r1, a1_r1_gp1_p1, a1_r1_gp1_p1]
random.shuffle(all_shard_ranges)
self.assertEqual([a1_r1_gp1_p1, a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
class TestShardRangeList(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
self.t1 = next(self.ts_iter)
self.t2 = next(self.ts_iter)
self.ts_iter = make_timestamp_iter()
self.shard_ranges = [
utils.ShardRange('a/b', self.t1, 'a', 'b',
object_count=2, bytes_used=22, tombstones=222),
utils.ShardRange('b/c', self.t2, 'b', 'c',
object_count=4, bytes_used=44, tombstones=444),
utils.ShardRange('c/y', self.t1, 'c', 'y',
object_count=6, bytes_used=66),
]
def test_init(self):
srl = ShardRangeList()
self.assertEqual(0, len(srl))
self.assertEqual(utils.ShardRange.MIN, srl.lower)
self.assertEqual(utils.ShardRange.MIN, srl.upper)
self.assertEqual(0, srl.object_count)
self.assertEqual(0, srl.bytes_used)
self.assertEqual(0, srl.row_count)
def test_init_with_list(self):
srl = ShardRangeList(self.shard_ranges[:2])
self.assertEqual(2, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('c', srl.upper)
self.assertEqual(6, srl.object_count)
self.assertEqual(66, srl.bytes_used)
self.assertEqual(672, srl.row_count)
srl.append(self.shard_ranges[2])
self.assertEqual(3, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('y', srl.upper)
self.assertEqual(12, srl.object_count)
self.assertEqual(132, srl.bytes_used)
self.assertEqual(-1, self.shard_ranges[2].tombstones) # sanity check
self.assertEqual(678, srl.row_count) # NB: tombstones=-1 not counted
def test_pop(self):
srl = ShardRangeList(self.shard_ranges[:2])
srl.pop()
self.assertEqual(1, len(srl))
self.assertEqual('a', srl.lower)
self.assertEqual('b', srl.upper)
self.assertEqual(2, srl.object_count)
self.assertEqual(22, srl.bytes_used)
self.assertEqual(224, srl.row_count)
def test_slice(self):
srl = ShardRangeList(self.shard_ranges)
sublist = srl[:1]
self.assertIsInstance(sublist, ShardRangeList)
self.assertEqual(1, len(sublist))
self.assertEqual('a', sublist.lower)
self.assertEqual('b', sublist.upper)
self.assertEqual(2, sublist.object_count)
self.assertEqual(22, sublist.bytes_used)
self.assertEqual(224, sublist.row_count)
sublist = srl[1:]
self.assertIsInstance(sublist, ShardRangeList)
self.assertEqual(2, len(sublist))
self.assertEqual('b', sublist.lower)
self.assertEqual('y', sublist.upper)
self.assertEqual(10, sublist.object_count)
self.assertEqual(110, sublist.bytes_used)
self.assertEqual(454, sublist.row_count)
def test_includes(self):
srl = ShardRangeList(self.shard_ranges)
for sr in self.shard_ranges:
self.assertTrue(srl.includes(sr))
self.assertTrue(srl.includes(srl))
sr = utils.ShardRange('a/a', utils.Timestamp.now(), '', 'a')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/a', utils.Timestamp.now(), '', 'b')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/z', utils.Timestamp.now(), 'x', 'z')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/z', utils.Timestamp.now(), 'y', 'z')
self.assertFalse(srl.includes(sr))
sr = utils.ShardRange('a/entire', utils.Timestamp.now(), '', '')
self.assertFalse(srl.includes(sr))
# entire range
srl_entire = ShardRangeList([sr])
self.assertFalse(srl.includes(srl_entire))
# make a fresh instance
sr = utils.ShardRange('a/entire', utils.Timestamp.now(), '', '')
self.assertTrue(srl_entire.includes(sr))
def test_timestamps(self):
srl = ShardRangeList(self.shard_ranges)
self.assertEqual({self.t1, self.t2}, srl.timestamps)
t3 = next(self.ts_iter)
self.shard_ranges[2].timestamp = t3
self.assertEqual({self.t1, self.t2, t3}, srl.timestamps)
srl.pop(0)
self.assertEqual({self.t2, t3}, srl.timestamps)
def test_states(self):
srl = ShardRangeList()
self.assertEqual(set(), srl.states)
srl = ShardRangeList(self.shard_ranges)
self.shard_ranges[0].update_state(
utils.ShardRange.CREATED, next(self.ts_iter))
self.shard_ranges[1].update_state(
utils.ShardRange.CLEAVED, next(self.ts_iter))
self.shard_ranges[2].update_state(
utils.ShardRange.ACTIVE, next(self.ts_iter))
self.assertEqual({utils.ShardRange.CREATED,
utils.ShardRange.CLEAVED,
utils.ShardRange.ACTIVE},
srl.states)
def test_filter(self):
srl = ShardRangeList(self.shard_ranges)
self.assertEqual(self.shard_ranges, srl.filter())
self.assertEqual(self.shard_ranges,
srl.filter(marker='', end_marker=''))
self.assertEqual(self.shard_ranges,
srl.filter(marker=utils.ShardRange.MIN,
end_marker=utils.ShardRange.MAX))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MAX,
end_marker=utils.ShardRange.MIN))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MIN,
end_marker=utils.ShardRange.MIN))
self.assertEqual([], srl.filter(marker=utils.ShardRange.MAX,
end_marker=utils.ShardRange.MAX))
self.assertEqual(self.shard_ranges[:1],
srl.filter(marker='', end_marker='b'))
self.assertEqual(self.shard_ranges[1:3],
srl.filter(marker='b', end_marker='y'))
self.assertEqual([],
srl.filter(marker='y', end_marker='y'))
self.assertEqual([],
srl.filter(marker='y', end_marker='x'))
# includes trumps marker & end_marker
self.assertEqual(self.shard_ranges[0:1],
srl.filter(includes='b', marker='c', end_marker='y'))
self.assertEqual(self.shard_ranges[0:1],
srl.filter(includes='b', marker='', end_marker=''))
self.assertEqual([], srl.filter(includes='z'))
def test_find_lower(self):
srl = ShardRangeList(self.shard_ranges)
self.shard_ranges[0].update_state(
utils.ShardRange.CREATED, next(self.ts_iter))
self.shard_ranges[1].update_state(
utils.ShardRange.CLEAVED, next(self.ts_iter))
self.shard_ranges[2].update_state(
utils.ShardRange.ACTIVE, next(self.ts_iter))
def do_test(states):
return srl.find_lower(lambda sr: sr.state in states)
self.assertEqual(srl.upper,
do_test([utils.ShardRange.FOUND]))
self.assertEqual(self.shard_ranges[0].lower,
do_test([utils.ShardRange.CREATED]))
self.assertEqual(self.shard_ranges[0].lower,
do_test((utils.ShardRange.CREATED,
utils.ShardRange.CLEAVED)))
self.assertEqual(self.shard_ranges[1].lower,
do_test((utils.ShardRange.ACTIVE,
utils.ShardRange.CLEAVED)))
self.assertEqual(self.shard_ranges[2].lower,
do_test([utils.ShardRange.ACTIVE]))
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_posix_fallocate')
@patch.object(utils, '_sys_fallocate')
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
class TestFallocate(unittest.TestCase):
def test_fallocate(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20)
# We can't use sys_fallocate_mock.assert_called_once_with because no
# two ctypes.c_uint64 objects are equal even if their values are
# equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 0)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 3 * 2 ** 30)
self.assertEqual(args[3].value, 5000 * 2 ** 20)
sys_posix_fallocate_mock.assert_not_called()
def test_fallocate_fatal_error(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EIO
with self.assertRaises(OSError) as cm:
utils.fallocate(1234, 5000 * 2 ** 20)
self.assertEqual(cm.exception.errno, errno.EIO)
def test_fallocate_silent_errors(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
get_errno_mock.return_value = silent_error
try:
utils.fallocate(1234, 5678)
except OSError:
self.fail("fallocate() raised an error on %d", silent_error)
def test_posix_fallocate_fallback(self, sys_fallocate_mock,
sys_posix_fallocate_mock,
get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 567890)
sys_fallocate_mock.assert_not_called()
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 0)
self.assertEqual(args[2].value, 567890)
def test_posix_fallocate_offset(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_fallocate_mock.side_effect = NotImplementedError
sys_posix_fallocate_mock.available = True
sys_posix_fallocate_mock.return_value = 0
utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
calls = sys_posix_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 3)
self.assertEqual(args[0], 1234)
self.assertEqual(args[1].value, 3 * 2 ** 30)
self.assertEqual(args[2].value, 5000 * 2 ** 20)
sys_fallocate_mock.assert_not_called()
def test_no_fallocates_available(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
sys_posix_fallocate_mock.available = False
with mock.patch("logging.warning") as warning_mock, \
mock.patch.object(utils, "_fallocate_warned_about_missing",
False):
utils.fallocate(321, 654)
utils.fallocate(321, 654)
sys_fallocate_mock.assert_not_called()
sys_posix_fallocate_mock.assert_not_called()
get_errno_mock.assert_not_called()
self.assertEqual(len(warning_mock.mock_calls), 1)
def test_arg_bounds(self, sys_fallocate_mock,
sys_posix_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.fallocate(0, 1 << 64, 0)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, -1)
with self.assertRaises(ValueError):
utils.fallocate(0, 0, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.fallocate(0, 0, 0)
self.assertEqual(
[mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
sys_fallocate_mock.reset_mock()
# negative size will be adjusted as 0
utils.fallocate(0, -1, 0)
self.assertEqual(
[mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
@patch.object(os, 'fstatvfs')
@patch.object(utils, '_sys_fallocate', available=True, return_value=0)
@patch.object(utils, 'FALLOCATE_RESERVE', 0)
@patch.object(utils, 'FALLOCATE_IS_PERCENT', False)
@patch.object(utils, '_fallocate_enabled', True)
class TestFallocateReserve(unittest.TestCase):
def _statvfs_result(self, f_frsize, f_bavail):
# Only 3 values are relevant to us, so use zeros for the rest
f_blocks = 100
return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
0, 0, 0, 0, 0))
def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
utils.disable_fallocate()
utils.fallocate(123, 456)
sys_fallocate_mock.assert_not_called()
fstatvfs_mock.assert_not_called()
def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
utils.fallocate(123, 456)
fstatvfs_mock.assert_not_called()
self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
# of size 1024 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
utils.fallocate(88, 1023)
def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
# of size 1024 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1 << 30)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
% (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
sys_fallocate_mock.assert_not_called()
def test_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
# of size 512 free, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
utils.fallocate(88, 1023)
def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
fstatvfs_mock):
# Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
# of size 512 free, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
fstatvfs_mock.return_value = self._statvfs_result(512, 4)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 1024)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
# Want 2048 bytes in reserve but have only 3 blocks of size 512, so
# allocating even 0 bytes fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
fstatvfs_mock.return_value = self._statvfs_result(512, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is bigger than the
# filesystem, so any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('9999999999999')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
self.assertRaises(OSError, utils.fallocate, 88, 0)
sys_fallocate_mock.assert_not_called()
def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2047, so succeed
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
utils.fallocate(88, 2047)
def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
# and file size is 2048, so fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 2048)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
# Filesystem is empty, but our reserve is the whole filesystem, so
# any allocation will fail
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
with self.assertRaises(OSError) as catcher:
utils.fallocate(88, 0)
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
% errno.ENOSPC)
sys_fallocate_mock.assert_not_called()
@patch('ctypes.get_errno')
@patch.object(utils, '_sys_fallocate')
class TestPunchHole(unittest.TestCase):
def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
utils.punch_hole(123, 456, 789)
calls = sys_fallocate_mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1]
self.assertEqual(len(args), 4)
self.assertEqual(args[0], 123)
self.assertEqual(
args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE)
self.assertEqual(args[2].value, 456)
self.assertEqual(args[3].value, 789)
def test_error(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = -1
get_errno_mock.return_value = errno.EISDIR
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.EISDIR)
def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = True
sys_fallocate_mock.return_value = 0
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, -1)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1 << 64, 1)
with self.assertRaises(ValueError):
utils.punch_hole(0, -1, 1)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 0)
with self.assertRaises(ValueError):
utils.punch_hole(0, 1, 1 << 64)
self.assertEqual([], sys_fallocate_mock.mock_calls)
# sanity check
utils.punch_hole(0, 0, 1)
self.assertEqual(
[mock.call(
0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE,
mock.ANY, mock.ANY)],
sys_fallocate_mock.mock_calls)
# Go confirm the ctypes values separately; apparently == doesn't
# work the way you'd expect with ctypes :-/
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
sys_fallocate_mock.available = False
with self.assertRaises(OSError) as cm:
utils.punch_hole(123, 456, 789)
self.assertEqual(cm.exception.errno, errno.ENOTSUP)
class TestPunchHoleReally(unittest.TestCase):
def setUp(self):
if not utils._sys_fallocate.available:
raise unittest.SkipTest("utils._sys_fallocate not available")
def test_punch_a_hole(self):
with TemporaryFile() as tf:
tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
tf.flush()
# knock out the first half of the "y"s
utils.punch_hole(tf.fileno(), 64, 32)
tf.seek(0)
contents = tf.read(4096)
self.assertEqual(
contents,
b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
class TestWatchdog(unittest.TestCase):
def test_start_stop(self):
w = utils.Watchdog()
w._evt.send = mock.Mock(side_effect=w._evt.send)
gth = object()
now = time.time()
timeout_value = 1.0
with patch('eventlet.greenthread.getcurrent', return_value=gth),\
patch('time.time', return_value=now):
# On first call, _next_expiration is None, it should unblock
# greenthread that is blocked for ever
key = w.start(timeout_value, Timeout)
self.assertIn(key, w._timeouts)
self.assertEqual(w._timeouts[key], (
timeout_value, now + timeout_value, gth, Timeout, now))
w._evt.send.assert_called_once()
w.stop(key)
self.assertNotIn(key, w._timeouts)
def test_timeout_concurrency(self):
w = utils.Watchdog()
w._evt.send = mock.Mock(side_effect=w._evt.send)
w._evt.wait = mock.Mock()
gth = object()
w._run()
w._evt.wait.assert_called_once_with(None)
with patch('eventlet.greenthread.getcurrent', return_value=gth):
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=10.00):
# On first call, _next_expiration is None, it should unblock
# greenthread that is blocked for ever
w.start(5.0, Timeout) # Will end at 15.0
w._evt.send.assert_called_once()
with patch('time.time', return_value=10.01):
w._run()
self.assertEqual(15.0, w._next_expiration)
w._evt.wait.assert_called_once_with(15.0 - 10.01)
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=12.00):
# Now _next_expiration is 15.0, it won't unblock greenthread
# because this expiration is later
w.start(5.0, Timeout) # Will end at 17.0
w._evt.send.assert_not_called()
w._evt.send.reset_mock()
w._evt.wait.reset_mock()
with patch('time.time', return_value=14.00):
# Now _next_expiration is still 15.0, it will unblock
# greenthread because this new expiration is 14.5
w.start(0.5, Timeout) # Will end at 14.5
w._evt.send.assert_called_once()
with patch('time.time', return_value=14.01):
w._run()
w._evt.wait.assert_called_once_with(14.5 - 14.01)
self.assertEqual(14.5, w._next_expiration)
# Should wakeup at 14.5
def test_timeout_expire(self):
w = utils.Watchdog()
w._evt.send = mock.Mock() # To avoid it to call get_hub()
w._evt.wait = mock.Mock() # To avoid it to call get_hub()
with patch('eventlet.hubs.get_hub') as m_gh:
with patch('time.time', return_value=10.0):
w.start(5.0, Timeout) # Will end at 15.0
with patch('time.time', return_value=16.0):
w._run()
m_gh.assert_called_once()
m_gh.return_value.schedule_call_global.assert_called_once()
exc = m_gh.return_value.schedule_call_global.call_args[0][2]
self.assertIsInstance(exc, Timeout)
self.assertEqual(exc.seconds, 5.0)
self.assertEqual(None, w._next_expiration)
w._evt.wait.assert_called_once_with(None)
class TestReiterate(unittest.TestCase):
def test_reiterate_consumes_first(self):
test_iter = FakeIterable([1, 2, 3])
reiterated = utils.reiterate(test_iter)
self.assertEqual(1, test_iter.next_call_count)
self.assertEqual(1, next(reiterated))
self.assertEqual(1, test_iter.next_call_count)
self.assertEqual(2, next(reiterated))
self.assertEqual(2, test_iter.next_call_count)
self.assertEqual(3, next(reiterated))
self.assertEqual(3, test_iter.next_call_count)
def test_reiterate_closes(self):
test_iter = FakeIterable([1, 2, 3])
self.assertEqual(0, test_iter.close_call_count)
reiterated = utils.reiterate(test_iter)
self.assertEqual(0, test_iter.close_call_count)
self.assertTrue(hasattr(reiterated, 'close'))
self.assertTrue(callable(reiterated.close))
reiterated.close()
self.assertEqual(1, test_iter.close_call_count)
# empty iter gets closed when reiterated
test_iter = FakeIterable([])
self.assertEqual(0, test_iter.close_call_count)
reiterated = utils.reiterate(test_iter)
self.assertFalse(hasattr(reiterated, 'close'))
self.assertEqual(1, test_iter.close_call_count)
def test_reiterate_list_or_tuple(self):
test_list = [1, 2]
reiterated = utils.reiterate(test_list)
self.assertIs(test_list, reiterated)
test_tuple = (1, 2)
reiterated = utils.reiterate(test_tuple)
self.assertIs(test_tuple, reiterated)
class TestCloseableChain(unittest.TestCase):
def test_closeable_chain_iterates(self):
test_iter1 = FakeIterable([1])
test_iter2 = FakeIterable([2, 3])
chain = utils.CloseableChain(test_iter1, test_iter2)
self.assertEqual([1, 2, 3], [x for x in chain])
chain = utils.CloseableChain([1, 2], [3])
self.assertEqual([1, 2, 3], [x for x in chain])
def test_closeable_chain_closes(self):
test_iter1 = FakeIterable([1])
test_iter2 = FakeIterable([2, 3])
chain = utils.CloseableChain(test_iter1, test_iter2)
self.assertEqual(0, test_iter1.close_call_count)
self.assertEqual(0, test_iter2.close_call_count)
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
self.assertEqual(1, test_iter2.close_call_count)
# check that close is safe to call even when component iters have no
# close
chain = utils.CloseableChain([1, 2], [3])
chain.close()
self.assertEqual([1, 2, 3], [x for x in chain])
# check with generator in the chain
generator_closed = [False]
def gen():
try:
yield 2
yield 3
except GeneratorExit:
generator_closed[0] = True
raise
test_iter1 = FakeIterable([1])
chain = utils.CloseableChain(test_iter1, gen())
self.assertEqual(0, test_iter1.close_call_count)
self.assertFalse(generator_closed[0])
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
# Generator never kicked off, so there's no GeneratorExit
self.assertFalse(generator_closed[0])
test_iter1 = FakeIterable([1])
chain = utils.CloseableChain(gen(), test_iter1)
self.assertEqual(2, next(chain)) # Kick off the generator
self.assertEqual(0, test_iter1.close_call_count)
self.assertFalse(generator_closed[0])
chain.close()
self.assertEqual(1, test_iter1.close_call_count)
self.assertTrue(generator_closed[0])
class TestCooperativeIterator(unittest.TestCase):
def test_init(self):
wrapped = itertools.count()
it = utils.CooperativeIterator(wrapped, period=3)
self.assertIs(wrapped, it.wrapped_iter)
self.assertEqual(0, it.count)
self.assertEqual(3, it.period)
def test_iter(self):
it = utils.CooperativeIterator(itertools.count())
actual = []
with mock.patch('swift.common.utils.sleep') as mock_sleep:
for i in it:
if i >= 100:
break
actual.append(i)
self.assertEqual(list(range(100)), actual)
self.assertEqual(20, mock_sleep.call_count)
def test_close(self):
it = utils.CooperativeIterator(range(5))
it.close()
closeable = mock.MagicMock()
closeable.close = mock.MagicMock()
it = utils.CooperativeIterator(closeable)
it.close()
self.assertTrue(closeable.close.called)
def test_next(self):
def do_test(it, period):
results = []
for i in range(period):
with mock.patch('swift.common.utils.sleep') as mock_sleep:
results.append(next(it))
self.assertFalse(mock_sleep.called, i)
with mock.patch('swift.common.utils.sleep') as mock_sleep:
results.append(next(it))
self.assertTrue(mock_sleep.called)
for i in range(period - 1):
with mock.patch('swift.common.utils.sleep') as mock_sleep:
results.append(next(it))
self.assertFalse(mock_sleep.called, i)
with mock.patch('swift.common.utils.sleep') as mock_sleep:
results.append(next(it))
self.assertTrue(mock_sleep.called)
return results
actual = do_test(utils.CooperativeIterator(itertools.count()), 5)
self.assertEqual(list(range(11)), actual)
actual = do_test(utils.CooperativeIterator(itertools.count(), 5), 5)
self.assertEqual(list(range(11)), actual)
actual = do_test(utils.CooperativeIterator(itertools.count(), 3), 3)
self.assertEqual(list(range(7)), actual)
actual = do_test(utils.CooperativeIterator(itertools.count(), 1), 1)
self.assertEqual(list(range(3)), actual)
actual = do_test(utils.CooperativeIterator(itertools.count(), 0), 0)
self.assertEqual(list(range(2)), actual)
class TestContextPool(unittest.TestCase):
def test_context_manager(self):
size = 5
pool = utils.ContextPool(size)
with pool:
for _ in range(size):
pool.spawn(eventlet.sleep, 10)
self.assertEqual(pool.running(), size)
self.assertEqual(pool.running(), 0)
def test_close(self):
size = 10
pool = utils.ContextPool(size)
for _ in range(size):
pool.spawn(eventlet.sleep, 10)
self.assertEqual(pool.running(), size)
pool.close()
self.assertEqual(pool.running(), 0)
| swift-master | test/unit/common/test_utils.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from contextlib import contextmanager
import eventlet
import os
import logging
import errno
import math
import time
from shutil import rmtree, copy
from tempfile import mkdtemp, NamedTemporaryFile
import json
import mock
from mock import patch, call
import six
from six.moves import reload_module
from swift.container.backend import DATADIR
from swift.common import db_replicator
from swift.common.utils import (normalize_timestamp, hash_path,
storage_directory, Timestamp)
from swift.common.exceptions import DriveNotMounted
from swift.common.swob import HTTPException
from test import unit
from test.debug_logger import debug_logger
from test.unit import attach_fake_replication_rpc
from test.unit.common.test_db import ExampleBroker
TEST_ACCOUNT_NAME = 'a c t'
TEST_CONTAINER_NAME = 'c o n'
def teardown_module():
"clean up my monkey patching"
reload_module(db_replicator)
@contextmanager
def lock_parent_directory(filename):
yield True
class FakeRing(object):
class Ring(object):
devs = []
def __init__(self, path, reload_time=15, ring_name=None):
pass
def get_part(self, account, container=None, obj=None):
return 0
def get_part_nodes(self, part):
return []
def get_more_nodes(self, *args):
return []
class FakeRingWithSingleNode(object):
class Ring(object):
devs = [dict(
id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6200, device='sdb',
meta='', replication_ip='1.1.1.1', replication_port=6200, region=1
)]
def __init__(self, path, reload_time=15, ring_name=None):
pass
def get_part(self, account, container=None, obj=None):
return 0
def get_part_nodes(self, part):
return self.devs
def get_more_nodes(self, *args):
return (d for d in self.devs)
class FakeRingWithNodes(object):
class Ring(object):
devs = [dict(
id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6200, device='sdb',
meta='', replication_ip='1.1.1.1', replication_port=6200, region=1
), dict(
id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6200, device='sdb',
meta='', replication_ip='1.1.1.2', replication_port=6200, region=2
), dict(
id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6200, device='sdb',
meta='', replication_ip='1.1.1.3', replication_port=6200, region=1
), dict(
id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6200, device='sdb',
meta='', replication_ip='1.1.1.4', replication_port=6200, region=2
), dict(
id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6200, device='sdb',
meta='', replication_ip='1.1.1.5', replication_port=6200, region=1
), dict(
id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6200, device='sdb',
meta='', replication_ip='1.1.1.6', replication_port=6200, region=2
)]
def __init__(self, path, reload_time=15, ring_name=None):
pass
def get_part(self, account, container=None, obj=None):
return 0
def get_part_nodes(self, part):
return self.devs[:3]
def get_more_nodes(self, *args):
return (d for d in self.devs[3:])
class FakeProcess(object):
def __init__(self, *codes):
self.codes = iter(codes)
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Failure(object):
def communicate(innerself):
next_item = next(self.codes)
if isinstance(next_item, int):
innerself.returncode = next_item
return next_item
raise next_item
return Failure()
@contextmanager
def _mock_process(*args):
orig_process = db_replicator.subprocess.Popen
db_replicator.subprocess.Popen = FakeProcess(*args)
yield db_replicator.subprocess.Popen
db_replicator.subprocess.Popen = orig_process
class ReplHttp(object):
def __init__(self, response=None, set_status=200):
if isinstance(response, six.text_type):
response = response.encode('ascii')
self.response = response
self.set_status = set_status
replicated = False
host = 'localhost'
node = {
'ip': '127.0.0.1',
'port': '6000',
'device': 'sdb',
}
def replicate(self, *args):
self.replicated = True
class Response(object):
status = self.set_status
data = self.response
def read(innerself):
return self.response
return Response()
class ChangingMtimesOs(object):
def __init__(self):
self.mtime = 0
def __call__(self, *args, **kwargs):
self.mtime += 1
return self.mtime
class FakeBroker(object):
db_file = __file__
get_repl_missing_table = False
stub_replication_info = None
db_type = 'container'
db_contains_type = 'object'
info = {'account': TEST_ACCOUNT_NAME, 'container': TEST_CONTAINER_NAME}
def __init__(self, *args, **kwargs):
self.locked = False
self.metadata = {}
return None
@contextmanager
def lock(self):
self.locked = True
yield True
self.locked = False
def get_sync(self, *args, **kwargs):
return 5
def get_syncs(self):
return []
def get_items_since(self, point, *args):
if point == 0:
return [{'ROWID': 1}]
if point == -1:
return [{'ROWID': 1}, {'ROWID': 2}]
return []
def merge_syncs(self, *args, **kwargs):
self.args = args
def merge_items(self, *args):
self.args = args
def get_replication_info(self):
if self.get_repl_missing_table:
raise Exception('no such table')
info = dict(self.info)
info.update({
'hash': 12345,
'delete_timestamp': 0,
'put_timestamp': 1,
'created_at': 1,
'count': 0,
'max_row': 99,
'id': 'ID',
'metadata': {}
})
if self.stub_replication_info:
info.update(self.stub_replication_info)
return info
def get_max_row(self, table=None):
return self.get_replication_info()['max_row']
def is_reclaimable(self, now, reclaim_age):
info = self.get_replication_info()
return info['count'] == 0 and (
(now - reclaim_age) >
info['delete_timestamp'] >
info['put_timestamp'])
def get_other_replication_items(self):
return None
def reclaim(self, item_timestamp, sync_timestamp):
pass
def newid(self, remote_d):
pass
def update_metadata(self, metadata):
self.metadata = metadata
def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):
self.created_at = created_at
self.put_timestamp = put_timestamp
self.delete_timestamp = delete_timestamp
def get_brokers(self):
return [self]
class FakeAccountBroker(FakeBroker):
db_type = 'account'
db_contains_type = 'container'
info = {'account': TEST_ACCOUNT_NAME}
class ConcreteReplicator(db_replicator.Replicator):
server_type = 'container'
ring_file = 'container.ring.gz'
brokerclass = FakeBroker
datadir = DATADIR
default_port = 1000
class TestDBReplicator(unittest.TestCase):
def setUp(self):
db_replicator.ring = FakeRing()
self.delete_db_calls = []
self._patchers = []
# recon cache path
self.recon_cache = mkdtemp()
rmtree(self.recon_cache, ignore_errors=1)
os.mkdir(self.recon_cache)
self.logger = debug_logger('test-replicator')
def tearDown(self):
for patcher in self._patchers:
patcher.stop()
rmtree(self.recon_cache, ignore_errors=1)
def _patch(self, patching_fn, *args, **kwargs):
patcher = patching_fn(*args, **kwargs)
patched_thing = patcher.start()
self._patchers.append(patcher)
return patched_thing
def stub_delete_db(self, broker):
self.delete_db_calls.append('/path/to/file')
return True
def test_creation(self):
# later config should be extended to assert more config options
replicator = ConcreteReplicator({'node_timeout': '3.5'})
self.assertEqual(replicator.node_timeout, 3.5)
self.assertEqual(replicator.databases_per_second, 50.0)
replicator = ConcreteReplicator({'databases_per_second': '0.1'})
self.assertEqual(replicator.node_timeout, 10)
self.assertEqual(replicator.databases_per_second, 0.1)
def test_repl_connection(self):
node = {'replication_ip': '127.0.0.1', 'replication_port': 80,
'device': 'sdb1'}
conn = db_replicator.ReplConnection(node, '1234567890', 'abcdefg',
logging.getLogger())
def req(method, path, body, headers):
self.assertEqual(method, 'REPLICATE')
self.assertEqual(headers['Content-Type'], 'application/json')
class Resp(object):
def read(self):
return 'data'
resp = Resp()
conn.request = req
conn.getresponse = lambda *args: resp
self.assertEqual(conn.replicate(1, 2, 3), resp)
def other_req(method, path, body, headers):
raise Exception('blah')
conn.request = other_req
class Closeable(object):
closed = False
def close(self):
self.closed = True
conn.sock = fake_sock = Closeable()
self.assertIsNone(conn.replicate(1, 2, 3))
self.assertTrue(fake_sock.closed)
self.assertEqual(None, conn.sock)
def test_rsync_file(self):
replicator = ConcreteReplicator({})
with _mock_process(-1):
self.assertEqual(
False,
replicator._rsync_file('/some/file', 'remote:/some/file'))
with _mock_process(0):
self.assertEqual(
True,
replicator._rsync_file('/some/file', 'remote:/some/file'))
def test_rsync_file_popen_args(self):
replicator = ConcreteReplicator({})
with _mock_process(0) as process:
replicator._rsync_file('/some/file', 'remote:/some_file')
exp_args = ([
'rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(replicator.node_timeout)),
'--contimeout=%s' % int(math.ceil(replicator.conn_timeout)),
'--whole-file', '/some/file', 'remote:/some_file'],)
self.assertEqual(exp_args, process.args)
def test_rsync_file_popen_args_whole_file_false(self):
replicator = ConcreteReplicator({})
with _mock_process(0) as process:
replicator._rsync_file('/some/file', 'remote:/some_file', False)
exp_args = ([
'rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(replicator.node_timeout)),
'--contimeout=%s' % int(math.ceil(replicator.conn_timeout)),
'/some/file', 'remote:/some_file'],)
self.assertEqual(exp_args, process.args)
def test_rsync_file_popen_args_different_region_and_rsync_compress(self):
replicator = ConcreteReplicator({})
for rsync_compress in (False, True):
replicator.rsync_compress = rsync_compress
for different_region in (False, True):
with _mock_process(0) as process:
replicator._rsync_file('/some/file', 'remote:/some_file',
False, different_region)
if rsync_compress and different_region:
# --compress arg should be passed to rsync binary
# only when rsync_compress option is enabled
# AND destination node is in a different
# region
self.assertTrue('--compress' in process.args[0])
else:
self.assertFalse('--compress' in process.args[0])
def test_rsync_db(self):
replicator = ConcreteReplicator({})
replicator._rsync_file = lambda *args, **kwargs: True
fake_device = {'replication_ip': '127.0.0.1', 'device': 'sda1'}
replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd')
def test_rsync_db_rsync_file_call(self):
fake_device = {'ip': '127.0.0.1', 'port': '0',
'replication_ip': '127.0.0.1', 'replication_port': '0',
'device': 'sda1'}
class MyTestReplicator(ConcreteReplicator):
def __init__(self, db_file, remote_file):
super(MyTestReplicator, self).__init__({})
self.db_file = db_file
self.remote_file = remote_file
def _rsync_file(self_, db_file, remote_file, whole_file=True,
different_region=False):
self.assertEqual(self_.db_file, db_file)
self.assertEqual(self_.remote_file, remote_file)
self_._rsync_file_called = True
return False
broker = FakeBroker()
remote_file = '127.0.0.1::container/sda1/tmp/abcd'
replicator = MyTestReplicator(broker.db_file, remote_file)
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
self.assertTrue(replicator._rsync_file_called)
def test_rsync_db_rsync_file_failure(self):
class MyTestReplicator(ConcreteReplicator):
def __init__(self):
super(MyTestReplicator, self).__init__({})
self._rsync_file_called = False
def _rsync_file(self_, *args, **kwargs):
self.assertEqual(
False, self_._rsync_file_called,
'_sync_file() should only be called once')
self_._rsync_file_called = True
return False
with patch('os.path.exists', lambda *args: True):
replicator = MyTestReplicator()
fake_device = {'ip': '127.0.0.1', 'replication_ip': '127.0.0.1',
'device': 'sda1'}
replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd')
self.assertEqual(True, replicator._rsync_file_called)
def test_rsync_db_change_after_sync(self):
class MyTestReplicator(ConcreteReplicator):
def __init__(self, broker):
super(MyTestReplicator, self).__init__({})
self.broker = broker
self._rsync_file_call_count = 0
def _rsync_file(self_, db_file, remote_file, whole_file=True,
different_region=False):
self_._rsync_file_call_count += 1
if self_._rsync_file_call_count == 1:
self.assertEqual(True, whole_file)
self.assertEqual(False, self_.broker.locked)
elif self_._rsync_file_call_count == 2:
self.assertEqual(False, whole_file)
self.assertEqual(True, self_.broker.locked)
else:
raise RuntimeError('_rsync_file() called too many times')
return True
# with journal file
with patch('os.path.exists', lambda *args: True):
broker = FakeBroker()
replicator = MyTestReplicator(broker)
fake_device = {'ip': '127.0.0.1', 'replication_ip': '127.0.0.1',
'device': 'sda1'}
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
self.assertEqual(2, replicator._rsync_file_call_count)
# with new mtime
with patch('os.path.exists', lambda *args: False):
with patch('os.path.getmtime', ChangingMtimesOs()):
broker = FakeBroker()
replicator = MyTestReplicator(broker)
fake_device = {'ip': '127.0.0.1',
'replication_ip': '127.0.0.1',
'device': 'sda1'}
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
self.assertEqual(2, replicator._rsync_file_call_count)
def test_in_sync(self):
replicator = ConcreteReplicator({})
self.assertEqual(replicator._in_sync(
{'id': 'a', 'point': 0, 'max_row': 0, 'hash': 'b'},
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b'},
FakeBroker(), -1), True)
self.assertEqual(replicator._in_sync(
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b'},
{'id': 'a', 'point': -1, 'max_row': 10, 'hash': 'b'},
FakeBroker(), -1), True)
self.assertEqual(bool(replicator._in_sync(
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'c'},
{'id': 'a', 'point': -1, 'max_row': 10, 'hash': 'd'},
FakeBroker(), -1)), False)
def test_run_once_no_local_device_in_ring(self):
replicator = ConcreteReplicator({'recon_cache_path': self.recon_cache},
logger=self.logger)
with patch('swift.common.db_replicator.whataremyips',
return_value=['127.0.0.1']):
replicator.run_once()
expected = [
"Can't find itself 127.0.0.1 with port 1000 "
"in ring file, not replicating",
]
self.assertEqual(expected, self.logger.get_lines_for_level('error'))
def test_run_once_with_local_device_in_ring(self):
base = 'swift.common.db_replicator.'
with patch(base + 'whataremyips', return_value=['1.1.1.1']), \
patch(base + 'ring', FakeRingWithNodes()):
replicator = ConcreteReplicator({
'bind_port': 6200,
'recon_cache_path': self.recon_cache
}, logger=self.logger)
replicator.run_once()
self.assertFalse(self.logger.get_lines_for_level('error'))
def test_run_once_no_ips(self):
replicator = ConcreteReplicator({}, logger=self.logger)
self._patch(patch.object, db_replicator, 'whataremyips',
lambda *a, **kw: [])
replicator.run_once()
self.assertEqual(
replicator.logger.get_lines_for_level('error'),
['ERROR Failed to get my own IPs?'])
def test_run_once_node_is_not_mounted(self):
db_replicator.ring = FakeRingWithSingleNode()
# If a bind_ip is specified, it's plumbed into whataremyips() and
# returned by itself.
conf = {'mount_check': 'true', 'bind_ip': '1.1.1.1',
'bind_port': 6200}
replicator = ConcreteReplicator(conf, logger=self.logger)
self.assertEqual(replicator.mount_check, True)
self.assertEqual(replicator.port, 6200)
err = ValueError('Boom!')
def mock_check_drive(root, device, mount_check):
self.assertEqual(root, replicator.root)
self.assertEqual(device, replicator.ring.devs[0]['device'])
self.assertEqual(mount_check, True)
raise err
self._patch(patch.object, db_replicator, 'check_drive',
mock_check_drive)
replicator.run_once()
self.assertEqual(
replicator.logger.get_lines_for_level('warning'),
['Skipping: %s' % (err,)])
def test_run_once_node_is_mounted(self):
db_replicator.ring = FakeRingWithSingleNode()
conf = {'mount_check': 'true', 'bind_port': 6200}
replicator = ConcreteReplicator(conf, logger=self.logger)
self.assertEqual(replicator.mount_check, True)
self.assertEqual(replicator.port, 6200)
def mock_unlink_older_than(path, mtime):
self.assertEqual(path,
os.path.join(replicator.root,
replicator.ring.devs[0]['device'],
'tmp'))
self.assertTrue(time.time() - replicator.reclaim_age >= mtime)
def mock_spawn_n(fn, part, object_file, node_id):
self.assertEqual('123', part)
self.assertEqual('/srv/node/sda/c.db', object_file)
self.assertEqual(1, node_id)
self._patch(patch.object, db_replicator, 'whataremyips',
lambda *a, **kw: ['1.1.1.1'])
self._patch(patch.object, db_replicator, 'unlink_older_than',
mock_unlink_older_than)
self._patch(patch.object, db_replicator, 'roundrobin_datadirs',
lambda *args: [('123', '/srv/node/sda/c.db', 1)])
self._patch(patch.object, replicator.cpool, 'spawn_n', mock_spawn_n)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(ismount=True) as mocks:
mock_os.path.isdir.return_value = True
replicator.run_once()
mock_os.path.isdir.assert_called_with(
os.path.join(replicator.root,
replicator.ring.devs[0]['device'],
replicator.datadir))
self.assertEqual([
mock.call(os.path.join(
replicator.root,
replicator.ring.devs[0]['device'])),
], mocks['ismount'].call_args_list)
def test_usync(self):
fake_http = ReplHttp()
replicator = ConcreteReplicator({})
replicator._usync_db(0, FakeBroker(), fake_http, '12345', '67890')
def test_usync_http_error_above_300(self):
fake_http = ReplHttp(set_status=301)
replicator = ConcreteReplicator({})
self.assertFalse(
replicator._usync_db(0, FakeBroker(), fake_http, '12345', '67890'))
def test_usync_http_error_below_200(self):
fake_http = ReplHttp(set_status=101)
replicator = ConcreteReplicator({})
self.assertFalse(
replicator._usync_db(0, FakeBroker(), fake_http, '12345', '67890'))
@mock.patch('swift.common.db_replicator.dump_recon_cache')
@mock.patch('swift.common.db_replicator.time.time', return_value=1234.5678)
def test_stats(self, mock_time, mock_recon_cache):
replicator = ConcreteReplicator({}, logger=self.logger)
replicator._zero_stats()
self.assertEqual(replicator.stats['start'], mock_time.return_value)
replicator._report_stats()
self.assertEqual(self.logger.get_lines_for_level('info'), [
'Attempted to replicate 0 dbs in 0.00000 seconds (0.00000/s)',
'Removed 0 dbs',
'0 successes, 0 failures',
'diff:0 diff_capped:0 empty:0 hashmatch:0 no_change:0 '
'remote_merge:0 rsync:0 ts_repl:0',
])
self.assertEqual(1, len(mock_recon_cache.mock_calls))
self.assertEqual(mock_recon_cache.mock_calls[0][1][0], {
'replication_time': 0.0,
'replication_last': mock_time.return_value,
'replication_stats': replicator.stats,
})
mock_recon_cache.reset_mock()
self.logger.clear()
replicator.stats.update({
'attempted': 30,
'success': 25,
'remove': 9,
'failure': 1,
'diff': 5,
'diff_capped': 4,
'empty': 7,
'hashmatch': 8,
'no_change': 6,
'remote_merge': 2,
'rsync': 3,
'ts_repl': 10,
})
mock_time.return_value += 246.813576
replicator._report_stats()
self.maxDiff = None
self.assertEqual(self.logger.get_lines_for_level('info'), [
'Attempted to replicate 30 dbs in 246.81358 seconds (0.12155/s)',
'Removed 9 dbs',
'25 successes, 1 failures',
'diff:5 diff_capped:4 empty:7 hashmatch:8 no_change:6 '
'remote_merge:2 rsync:3 ts_repl:10',
])
self.assertEqual(1, len(mock_recon_cache.mock_calls))
self.assertEqual(mock_recon_cache.mock_calls[0][1][0], {
'replication_time': 246.813576,
'replication_last': mock_time.return_value,
'replication_stats': replicator.stats,
})
def test_replicate_object(self):
# verify return values from replicate_object
db_replicator.ring = FakeRingWithNodes()
db_path = '/path/to/file'
replicator = ConcreteReplicator({}, logger=self.logger)
info = FakeBroker().get_replication_info()
# make remote appear to be in sync
rinfo = {'point': info['max_row'], 'id': 'remote_id'}
class FakeResponse(object):
def __init__(self, status, rinfo):
self._status = status
self.data = json.dumps(rinfo).encode('ascii')
@property
def status(self):
if isinstance(self._status, (Exception, eventlet.Timeout)):
raise self._status
return self._status
# all requests fail
replicate = 'swift.common.db_replicator.ReplConnection.replicate'
with mock.patch(replicate) as fake_replicate:
fake_replicate.side_effect = [
FakeResponse(500, None),
FakeResponse(500, None),
FakeResponse(500, None)]
with mock.patch.object(replicator, 'delete_db') as mock_delete:
res = replicator._replicate_object('0', db_path, 'node_id')
self.assertRaises(StopIteration, next, fake_replicate.side_effect)
self.assertEqual((False, [False, False, False]), res)
self.assertEqual(0, mock_delete.call_count)
self.assertFalse(replicator.logger.get_lines_for_level('error'))
self.assertFalse(replicator.logger.get_lines_for_level('warning'))
replicator.logger.clear()
with mock.patch(replicate) as fake_replicate:
fake_replicate.side_effect = [
FakeResponse(Exception('ugh'), None),
FakeResponse(eventlet.Timeout(), None),
FakeResponse(200, rinfo)]
with mock.patch.object(replicator, 'delete_db') as mock_delete:
res = replicator._replicate_object('0', db_path, 'node_id')
self.assertRaises(StopIteration, next, fake_replicate.side_effect)
self.assertEqual((False, [False, False, True]), res)
self.assertEqual(0, mock_delete.call_count)
lines = replicator.logger.get_lines_for_level('error')
self.assertIn('ERROR syncing', lines[0])
self.assertIn('ERROR syncing', lines[1])
self.assertFalse(lines[2:])
self.assertFalse(replicator.logger.get_lines_for_level('warning'))
replicator.logger.clear()
# partial success
with mock.patch(replicate) as fake_replicate:
fake_replicate.side_effect = [
FakeResponse(200, rinfo),
FakeResponse(200, rinfo),
FakeResponse(500, None)]
with mock.patch.object(replicator, 'delete_db') as mock_delete:
res = replicator._replicate_object('0', db_path, 'node_id')
self.assertRaises(StopIteration, next, fake_replicate.side_effect)
self.assertEqual((False, [True, True, False]), res)
self.assertEqual(0, mock_delete.call_count)
self.assertFalse(replicator.logger.get_lines_for_level('error'))
self.assertFalse(replicator.logger.get_lines_for_level('warning'))
replicator.logger.clear()
# 507 triggers additional requests
with mock.patch(replicate) as fake_replicate:
fake_replicate.side_effect = [
FakeResponse(200, rinfo),
FakeResponse(200, rinfo),
FakeResponse(507, None),
FakeResponse(507, None),
FakeResponse(200, rinfo)]
with mock.patch.object(replicator, 'delete_db') as mock_delete:
res = replicator._replicate_object('0', db_path, 'node_id')
self.assertRaises(StopIteration, next, fake_replicate.side_effect)
self.assertEqual((False, [True, True, False, False, True]), res)
self.assertEqual(0, mock_delete.call_count)
lines = replicator.logger.get_lines_for_level('error')
self.assertIn('Remote drive not mounted', lines[0])
self.assertIn('Remote drive not mounted', lines[1])
self.assertFalse(lines[2:])
self.assertFalse(replicator.logger.get_lines_for_level('warning'))
replicator.logger.clear()
# all requests succeed; node id == 'node_id' causes node to be
# considered a handoff so expect the db to be deleted
with mock.patch(replicate) as fake_replicate:
fake_replicate.side_effect = [
FakeResponse(200, rinfo),
FakeResponse(200, rinfo),
FakeResponse(200, rinfo)]
with mock.patch.object(replicator, 'delete_db') as mock_delete:
res = replicator._replicate_object('0', db_path, 'node_id')
self.assertRaises(StopIteration, next, fake_replicate.side_effect)
self.assertEqual((True, [True, True, True]), res)
self.assertEqual(1, mock_delete.call_count)
self.assertFalse(replicator.logger.get_lines_for_level('error'))
self.assertFalse(replicator.logger.get_lines_for_level('warning'))
def test_replicate_object_quarantine(self):
replicator = ConcreteReplicator({})
self._patch(patch.object, replicator.brokerclass, 'db_file',
'/a/b/c/d/e/hey')
self._patch(patch.object, replicator.brokerclass,
'get_repl_missing_table', True)
def mock_renamer(was, new, fsync=False, cause_colision=False):
if cause_colision and '-' not in new:
raise OSError(errno.EEXIST, "File already exists")
self.assertEqual('/a/b/c/d/e', was)
if '-' in new:
self.assertTrue(
new.startswith('/a/quarantined/containers/e-'))
else:
self.assertEqual('/a/quarantined/containers/e', new)
def mock_renamer_error(was, new, fsync):
return mock_renamer(was, new, fsync, cause_colision=True)
with patch.object(db_replicator, 'renamer', mock_renamer):
replicator._replicate_object('0', 'file', 'node_id')
# try the double quarantine
with patch.object(db_replicator, 'renamer', mock_renamer_error):
replicator._replicate_object('0', 'file', 'node_id')
def test_replicate_object_delete_because_deleted(self):
replicator = ConcreteReplicator({})
try:
replicator.delete_db = self.stub_delete_db
replicator.brokerclass.stub_replication_info = {
'delete_timestamp': 2, 'put_timestamp': 1}
replicator._replicate_object('0', '/path/to/file', 'node_id')
finally:
replicator.brokerclass.stub_replication_info = None
self.assertEqual(['/path/to/file'], self.delete_db_calls)
def test_replicate_object_delete_because_not_shouldbehere(self):
replicator = ConcreteReplicator({})
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
replicator._repl_to_node = lambda *args: True
replicator.delete_db = self.stub_delete_db
orig_cleanup = replicator.cleanup_post_replicate
with mock.patch.object(replicator, 'cleanup_post_replicate',
side_effect=orig_cleanup) as mock_cleanup:
replicator._replicate_object('0', '/path/to/file', 'node_id')
mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY, [True] * 3)
self.assertIsInstance(mock_cleanup.call_args[0][0],
replicator.brokerclass)
self.assertEqual(['/path/to/file'], self.delete_db_calls)
self.assertEqual(0, replicator.stats['failure'])
def test_handoff_delete(self):
def do_test(config, repl_to_node_results, expect_delete):
self.delete_db_calls = []
replicator = ConcreteReplicator(config)
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
mock_repl_to_node = mock.Mock()
mock_repl_to_node.side_effect = repl_to_node_results
replicator._repl_to_node = mock_repl_to_node
replicator.delete_db = self.stub_delete_db
orig_cleanup = replicator.cleanup_post_replicate
with mock.patch.object(replicator, 'cleanup_post_replicate',
side_effect=orig_cleanup) as mock_cleanup:
replicator._replicate_object('0', '/path/to/file', 'node_id')
mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY,
repl_to_node_results)
self.assertIsInstance(mock_cleanup.call_args[0][0],
replicator.brokerclass)
if expect_delete:
self.assertEqual(['/path/to/file'], self.delete_db_calls)
else:
self.assertNotEqual(['/path/to/file'], self.delete_db_calls)
self.assertEqual(repl_to_node_results.count(True),
replicator.stats['success'])
self.assertEqual(repl_to_node_results.count(False),
replicator.stats['failure'])
for cfg, repl_results, expected_delete in (
# Start with the sanilty check
({}, [True] * 3, True),
({}, [True, True, False], False),
({'handoff_delete': 'auto'}, [True] * 3, True),
({'handoff_delete': 'auto'}, [True, True, False], False),
({'handoff_delete': 0}, [True] * 3, True),
({'handoff_delete': 0}, [True, True, False], False),
# Now test a lower handoff delete
({'handoff_delete': 2}, [True] * 3, True),
({'handoff_delete': 2}, [True, True, False], True),
({'handoff_delete': 2}, [True, False, False], False),
({'handoff_delete': 1}, [True] * 3, True),
({'handoff_delete': 1}, [True, True, False], True),
({'handoff_delete': 1}, [True, False, False], True)):
do_test(cfg, repl_results, expected_delete)
def test_replicate_object_delete_delegated_to_cleanup_post_replicate(self):
replicator = ConcreteReplicator({})
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
replicator._repl_to_node = lambda *args: True
replicator.delete_db = self.stub_delete_db
# cleanup succeeds
with mock.patch.object(replicator, 'cleanup_post_replicate',
return_value=True) as mock_cleanup:
replicator._replicate_object('0', '/path/to/file', 'node_id')
mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY, [True] * 3)
self.assertIsInstance(mock_cleanup.call_args[0][0],
replicator.brokerclass)
self.assertFalse(self.delete_db_calls)
self.assertEqual(0, replicator.stats['failure'])
self.assertEqual(3, replicator.stats['success'])
# cleanup fails
replicator._zero_stats()
with mock.patch.object(replicator, 'cleanup_post_replicate',
return_value=False) as mock_cleanup:
replicator._replicate_object('0', '/path/to/file', 'node_id')
mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY, [True] * 3)
self.assertIsInstance(mock_cleanup.call_args[0][0],
replicator.brokerclass)
self.assertFalse(self.delete_db_calls)
self.assertEqual(3, replicator.stats['failure'])
self.assertEqual(0, replicator.stats['success'])
# shouldbehere True - cleanup not required
replicator._zero_stats()
primary_node_id = replicator.ring.get_part_nodes('0')[0]['id']
with mock.patch.object(replicator, 'cleanup_post_replicate',
return_value=True) as mock_cleanup:
replicator._replicate_object('0', '/path/to/file', primary_node_id)
mock_cleanup.assert_not_called()
self.assertFalse(self.delete_db_calls)
self.assertEqual(0, replicator.stats['failure'])
self.assertEqual(2, replicator.stats['success'])
def test_cleanup_post_replicate(self):
replicator = ConcreteReplicator({}, logger=self.logger)
replicator.ring = FakeRingWithNodes().Ring('path')
broker = FakeBroker()
replicator._repl_to_node = lambda *args: True
info = broker.get_replication_info()
with mock.patch.object(replicator, 'delete_db') as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [False] * 3)
mock_delete_db.assert_not_called()
self.assertTrue(res)
self.assertEqual(['Not deleting db %s (0/3 success)' % broker.db_file],
replicator.logger.get_lines_for_level('debug'))
replicator.logger.clear()
with mock.patch.object(replicator, 'delete_db') as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [True, False, True])
mock_delete_db.assert_not_called()
self.assertTrue(res)
self.assertEqual(['Not deleting db %s (2/3 success)' % broker.db_file],
replicator.logger.get_lines_for_level('debug'))
replicator.logger.clear()
broker.stub_replication_info = {'max_row': 101}
with mock.patch.object(replicator, 'delete_db') as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [True] * 3)
mock_delete_db.assert_not_called()
self.assertTrue(res)
self.assertEqual(['Not deleting db %s (2 new rows)' % broker.db_file],
replicator.logger.get_lines_for_level('debug'))
replicator.logger.clear()
broker.stub_replication_info = {'max_row': 98}
with mock.patch.object(replicator, 'delete_db') as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [True] * 3)
mock_delete_db.assert_not_called()
self.assertTrue(res)
broker.stub_replication_info = None
self.assertEqual(['Not deleting db %s (negative max_row_delta: -1)' %
broker.db_file],
replicator.logger.get_lines_for_level('error'))
replicator.logger.clear()
with mock.patch.object(replicator, 'delete_db') as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [True] * 3)
mock_delete_db.assert_called_once_with(broker)
self.assertTrue(res)
self.assertEqual(['Successfully deleted db %s' % broker.db_file],
replicator.logger.get_lines_for_level('debug'))
replicator.logger.clear()
with mock.patch.object(replicator, 'delete_db',
return_value=False) as mock_delete_db:
res = replicator.cleanup_post_replicate(
broker, info, [True] * 3)
mock_delete_db.assert_called_once_with(broker)
self.assertFalse(res)
self.assertEqual(['Failed to delete db %s' % broker.db_file],
replicator.logger.get_lines_for_level('debug'))
replicator.logger.clear()
def test_replicate_object_with_exception(self):
replicator = ConcreteReplicator({})
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
replicator.delete_db = self.stub_delete_db
replicator._repl_to_node = mock.Mock(side_effect=Exception())
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[0]['id'])
self.assertEqual(2, replicator._repl_to_node.call_count)
# with one DriveNotMounted exception called on +1 more replica
replicator._repl_to_node = mock.Mock(side_effect=[DriveNotMounted()])
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[0]['id'])
self.assertEqual(3, replicator._repl_to_node.call_count)
# called on +1 more replica and self when *first* handoff
replicator._repl_to_node = mock.Mock(side_effect=[DriveNotMounted()])
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[3]['id'])
self.assertEqual(4, replicator._repl_to_node.call_count)
# even if it's the last handoff it works to keep 3 replicas
# 2 primaries + 1 handoff
replicator._repl_to_node = mock.Mock(side_effect=[DriveNotMounted()])
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[-1]['id'])
self.assertEqual(4, replicator._repl_to_node.call_count)
# with two DriveNotMounted exceptions called on +2 more replica keeping
# durability
replicator._repl_to_node = mock.Mock(
side_effect=[DriveNotMounted()] * 2)
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[0]['id'])
self.assertEqual(4, replicator._repl_to_node.call_count)
def test_replicate_object_with_exception_run_out_of_nodes(self):
replicator = ConcreteReplicator({})
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
replicator.delete_db = self.stub_delete_db
# all other devices are not mounted
replicator._repl_to_node = mock.Mock(side_effect=DriveNotMounted())
replicator._replicate_object('0', '/path/to/file',
replicator.ring.devs[0]['id'])
self.assertEqual(5, replicator._repl_to_node.call_count)
def test_replicate_account_out_of_place(self):
replicator = ConcreteReplicator({}, logger=self.logger)
replicator.ring = FakeRingWithNodes().Ring('path')
replicator.brokerclass = FakeAccountBroker
replicator._repl_to_node = lambda *args: True
replicator.delete_db = self.stub_delete_db
# Correct node_id, wrong part
part = replicator.ring.get_part(TEST_ACCOUNT_NAME) + 1
node_id = replicator.ring.get_part_nodes(part)[0]['id']
replicator._replicate_object(str(part), '/path/to/file', node_id)
self.assertEqual(['/path/to/file'], self.delete_db_calls)
error_msgs = replicator.logger.get_lines_for_level('error')
expected = 'Found /path/to/file for /a%20c%20t when it should be ' \
'on partition 0; will replicate out and remove.'
self.assertEqual(error_msgs, [expected])
def test_replicate_container_out_of_place(self):
replicator = ConcreteReplicator({}, logger=self.logger)
replicator.ring = FakeRingWithNodes().Ring('path')
replicator._repl_to_node = lambda *args: True
replicator.delete_db = self.stub_delete_db
# Correct node_id, wrong part
part = replicator.ring.get_part(
TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1
node_id = replicator.ring.get_part_nodes(part)[0]['id']
replicator._replicate_object(str(part), '/path/to/file', node_id)
self.assertEqual(['/path/to/file'], self.delete_db_calls)
self.assertEqual(
replicator.logger.get_lines_for_level('error'),
['Found /path/to/file for /a%20c%20t/c%20o%20n when it should '
'be on partition 0; will replicate out and remove.'])
def test_replicate_container_out_of_place_no_node(self):
replicator = ConcreteReplicator({}, logger=self.logger)
replicator.ring = FakeRingWithSingleNode().Ring('path')
replicator._repl_to_node = lambda *args: True
replicator.delete_db = self.stub_delete_db
# Correct node_id, wrong part
part = replicator.ring.get_part(
TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1
node_id = replicator.ring.get_part_nodes(part)[0]['id']
replicator._replicate_object(str(part), '/path/to/file', node_id)
self.assertEqual(['/path/to/file'], self.delete_db_calls)
self.delete_db_calls = []
# No nodes this time
replicator.ring.get_part_nodes = lambda *args: []
replicator.delete_db = self.stub_delete_db
# Correct node_id, wrong part
part = replicator.ring.get_part(
TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1
replicator._replicate_object(str(part), '/path/to/file', node_id)
self.assertEqual([], self.delete_db_calls)
def test_replicate_object_different_region(self):
db_replicator.ring = FakeRingWithNodes()
replicator = ConcreteReplicator({})
replicator._repl_to_node = mock.Mock()
# For node_id = 1, one replica in same region(1) and other is in a
# different region(2). Refer: FakeRingWithNodes
replicator._replicate_object('0', '/path/to/file', 1)
# different_region was set True and passed to _repl_to_node()
self.assertEqual(replicator._repl_to_node.call_args_list[0][0][-1],
True)
# different_region was set False and passed to _repl_to_node()
self.assertEqual(replicator._repl_to_node.call_args_list[1][0][-1],
False)
def test_delete_db(self):
db_replicator.lock_parent_directory = lock_parent_directory
replicator = ConcreteReplicator({}, logger=self.logger)
replicator._zero_stats()
replicator.extract_device = lambda _: 'some_device'
temp_dir = mkdtemp()
try:
temp_part_dir = os.path.join(temp_dir, '140')
os.mkdir(temp_part_dir)
temp_suf_dir = os.path.join(temp_part_dir, '16e')
os.mkdir(temp_suf_dir)
temp_hash_dir = os.path.join(temp_suf_dir,
'166e33924a08ede4204871468c11e16e')
os.mkdir(temp_hash_dir)
temp_file = NamedTemporaryFile(dir=temp_hash_dir, delete=False)
temp_hash_dir2 = os.path.join(temp_suf_dir,
'266e33924a08ede4204871468c11e16e')
os.mkdir(temp_hash_dir2)
temp_file2 = NamedTemporaryFile(dir=temp_hash_dir2, delete=False)
# sanity-checks
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(os.path.exists(temp_part_dir))
self.assertTrue(os.path.exists(temp_suf_dir))
self.assertTrue(os.path.exists(temp_hash_dir))
self.assertTrue(os.path.exists(temp_file.name))
self.assertTrue(os.path.exists(temp_hash_dir2))
self.assertTrue(os.path.exists(temp_file2.name))
self.assertEqual(0, replicator.stats['remove'])
temp_file.db_file = temp_file.name
replicator.delete_db(temp_file)
self.assertTrue(os.path.exists(temp_dir))
self.assertTrue(os.path.exists(temp_part_dir))
self.assertTrue(os.path.exists(temp_suf_dir))
self.assertFalse(os.path.exists(temp_hash_dir))
self.assertFalse(os.path.exists(temp_file.name))
self.assertTrue(os.path.exists(temp_hash_dir2))
self.assertTrue(os.path.exists(temp_file2.name))
self.assertEqual(
[(('removes.some_device',), {})],
replicator.logger.statsd_client.calls['increment'])
self.assertEqual(1, replicator.stats['remove'])
temp_file2.db_file = temp_file2.name
replicator.delete_db(temp_file2)
self.assertTrue(os.path.exists(temp_dir))
self.assertFalse(os.path.exists(temp_part_dir))
self.assertFalse(os.path.exists(temp_suf_dir))
self.assertFalse(os.path.exists(temp_hash_dir))
self.assertFalse(os.path.exists(temp_file.name))
self.assertFalse(os.path.exists(temp_hash_dir2))
self.assertFalse(os.path.exists(temp_file2.name))
self.assertEqual(
[(('removes.some_device',), {})] * 2,
replicator.logger.statsd_client.calls['increment'])
self.assertEqual(2, replicator.stats['remove'])
finally:
rmtree(temp_dir)
def test_extract_device(self):
replicator = ConcreteReplicator({'devices': '/some/root'})
self.assertEqual('some_device', replicator.extract_device(
'/some/root/some_device/deeper/and/deeper'))
self.assertEqual('UNKNOWN', replicator.extract_device(
'/some/foo/some_device/deeper/and/deeper'))
def test_dispatch_no_arg_pop(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with unit.mock_check_drive(isdir=True):
response = rpc.dispatch(('a',), 'arg')
self.assertEqual(b'Invalid object type', response.body)
self.assertEqual(400, response.status_int)
def test_dispatch_drive_not_mounted(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=True)
with unit.mock_check_drive() as mocks:
response = rpc.dispatch(('drive', 'part', 'hash'), ['method'])
self.assertEqual([mock.call(os.path.join('/drive'))],
mocks['ismount'].call_args_list)
self.assertEqual('507 drive is not mounted', response.status)
self.assertEqual(507, response.status_int)
def test_dispatch_unexpected_operation_db_does_not_exist(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
def mock_mkdirs(path):
self.assertEqual('/drive/tmp', path)
self._patch(patch.object, db_replicator, 'mkdirs', mock_mkdirs)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = False
response = rpc.dispatch(('drive', 'part', 'hash'), ['unexpected'])
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
def test_dispatch_operation_unexpected(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
self._patch(patch.object, db_replicator, 'mkdirs', lambda *args: True)
def unexpected_method(broker, args):
self.assertEqual(FakeBroker, broker.__class__)
self.assertEqual(['arg1', 'arg2'], args)
return 'unexpected-called'
rpc.unexpected = unexpected_method
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = True
response = rpc.dispatch(('drive', 'part', 'hash'),
['unexpected', 'arg1', 'arg2'])
mock_os.path.exists.assert_called_with('/part/ash/hash/hash.db')
self.assertEqual('unexpected-called', response)
def test_dispatch_operation_rsync_then_merge(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
self._patch(patch.object, db_replicator, 'renamer', lambda *args: True)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = True
response = rpc.dispatch(('drive', 'part', 'hash'),
['rsync_then_merge', 'arg1', 'arg2'])
expected_calls = [call('/part/ash/hash/hash.db'),
call('/drive/tmp/arg1'),
call(FakeBroker.db_file),
call('/drive/tmp/arg1')]
self.assertEqual(mock_os.path.exists.call_args_list,
expected_calls)
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
def test_dispatch_operation_complete_rsync(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
self._patch(patch.object, db_replicator, 'renamer', lambda *args: True)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.side_effect = [False, True]
response = rpc.dispatch(('drive', 'part', 'hash'),
['complete_rsync', 'arg1'])
expected_calls = [call('/part/ash/hash/hash.db'),
call('/drive/tmp/arg1')]
self.assertEqual(mock_os.path.exists.call_args_list,
expected_calls)
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.side_effect = [False, True]
response = rpc.dispatch(('drive', 'part', 'hash'),
['complete_rsync', 'arg1', 'arg2'])
expected_calls = [call('/part/ash/hash/arg2'),
call('/drive/tmp/arg1')]
self.assertEqual(mock_os.path.exists.call_args_list,
expected_calls)
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
def test_rsync_then_merge_db_does_not_exist(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = False
response = rpc.rsync_then_merge('drive', '/data/db.db',
('arg1', 'arg2'))
mock_os.path.exists.assert_called_with('/data/db.db')
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
def test_rsync_then_merge_old_does_not_exist(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.side_effect = [True, False]
response = rpc.rsync_then_merge('drive', '/data/db.db',
('arg1', 'arg2'))
expected_calls = [call('/data/db.db'), call('/drive/tmp/arg1')]
self.assertEqual(mock_os.path.exists.call_args_list,
expected_calls)
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
def test_rsync_then_merge_with_objects(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
def mock_renamer(old, new):
self.assertEqual('/drive/tmp/arg1', old)
# FakeBroker uses module filename as db_file!
self.assertEqual(__file__, new)
self._patch(patch.object, db_replicator, 'renamer', mock_renamer)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = True
response = rpc.rsync_then_merge('drive', '/data/db.db',
['arg1', 'arg2'])
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
def test_complete_rsync_db_exists(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = True
response = rpc.complete_rsync('drive', '/data/db.db', ['arg1'])
mock_os.path.exists.assert_called_with('/data/db.db')
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = True
response = rpc.complete_rsync('drive', '/data/db.db',
['arg1', 'arg2'])
mock_os.path.exists.assert_called_with('/data/arg2')
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
def test_complete_rsync_old_file_does_not_exist(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = False
response = rpc.complete_rsync('drive', '/data/db.db',
['arg1'])
expected_calls = [call('/data/db.db'), call('/drive/tmp/arg1')]
self.assertEqual(expected_calls,
mock_os.path.exists.call_args_list)
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.return_value = False
response = rpc.complete_rsync('drive', '/data/db.db',
['arg1', 'arg2'])
expected_calls = [call('/data/arg2'), call('/drive/tmp/arg1')]
self.assertEqual(expected_calls,
mock_os.path.exists.call_args_list)
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
def test_complete_rsync_rename(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
def mock_renamer(old, new):
renamer_calls.append((old, new))
self._patch(patch.object, db_replicator, 'renamer', mock_renamer)
renamer_calls = []
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.side_effect = [False, True]
response = rpc.complete_rsync('drive', '/data/db.db',
['arg1'])
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
self.assertEqual(('/drive/tmp/arg1', '/data/db.db'), renamer_calls[0])
self.assertFalse(renamer_calls[1:])
renamer_calls = []
with patch('swift.common.db_replicator.os',
new=mock.MagicMock(wraps=os)) as mock_os, \
unit.mock_check_drive(isdir=True):
mock_os.path.exists.side_effect = [False, True]
response = rpc.complete_rsync('drive', '/data/db.db',
['arg1', 'arg2'])
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
self.assertEqual(('/drive/tmp/arg1', '/data/arg2'), renamer_calls[0])
self.assertFalse(renamer_calls[1:])
def test_replicator_sync_with_broker_replication_missing_table(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False,
logger=self.logger)
broker = FakeBroker()
broker.get_repl_missing_table = True
called = []
def mock_quarantine_db(object_file, server_type):
called.append(True)
self.assertEqual(broker.db_file, object_file)
self.assertEqual(broker.db_type, server_type)
self._patch(patch.object, db_replicator, 'quarantine_db',
mock_quarantine_db)
with unit.mock_check_drive(isdir=True):
response = rpc.sync(broker, ('remote_sync', 'hash_', 'id_',
'created_at', 'put_timestamp',
'delete_timestamp', 'metadata'))
self.assertEqual('404 Not Found', response.status)
self.assertEqual(404, response.status_int)
self.assertEqual(called, [True])
errors = rpc.logger.get_lines_for_level('error')
self.assertEqual(errors,
["Unable to decode remote metadata 'metadata'",
"Quarantining DB %s" % broker])
def test_replicator_sync(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
broker = FakeBroker()
with unit.mock_check_drive(isdir=True):
response = rpc.sync(broker, (
broker.get_sync() + 1, 12345, 'id_',
'created_at', 'put_timestamp', 'delete_timestamp',
'{"meta1": "data1", "meta2": "data2"}'))
self.assertEqual({'meta1': 'data1', 'meta2': 'data2'},
broker.metadata)
self.assertEqual('created_at', broker.created_at)
self.assertEqual('put_timestamp', broker.put_timestamp)
self.assertEqual('delete_timestamp', broker.delete_timestamp)
self.assertEqual('200 OK', response.status)
self.assertEqual(200, response.status_int)
def test_rsync_then_merge(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with unit.mock_check_drive(isdir=True):
rpc.rsync_then_merge('sda1', '/srv/swift/blah', ('a', 'b'))
def test_merge_items(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
fake_broker = FakeBroker()
args = ('a', 'b')
with unit.mock_check_drive(isdir=True):
rpc.merge_items(fake_broker, args)
self.assertEqual(fake_broker.args, args)
def test_merge_syncs(self):
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
fake_broker = FakeBroker()
args = ('a', 'b')
with unit.mock_check_drive(isdir=True):
rpc.merge_syncs(fake_broker, args)
self.assertEqual(fake_broker.args, (args[0],))
def test_complete_rsync_with_bad_input(self):
drive = '/some/root'
db_file = __file__
args = ['old_file']
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
with unit.mock_check_drive(isdir=True):
resp = rpc.complete_rsync(drive, db_file, args)
self.assertTrue(isinstance(resp, HTTPException))
self.assertEqual(404, resp.status_int)
with unit.mock_check_drive(isdir=True):
resp = rpc.complete_rsync(drive, 'new_db_file', args)
self.assertTrue(isinstance(resp, HTTPException))
self.assertEqual(404, resp.status_int)
def test_complete_rsync(self):
drive = mkdtemp()
args = ['old_file']
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
mount_check=False)
os.mkdir('%s/tmp' % drive)
old_file = '%s/tmp/old_file' % drive
new_file = '%s/new_db_file' % drive
try:
fp = open(old_file, 'w')
fp.write('void')
fp.close
resp = rpc.complete_rsync(drive, new_file, args)
self.assertEqual(204, resp.status_int)
finally:
rmtree(drive)
@unit.with_tempdir
def test_empty_suffix_and_hash_dirs_get_cleanedup(self, tempdir):
datadir = os.path.join(tempdir, 'containers')
db_path = ('450/afd/7089ab48d955ab0851fc51cc17a34afd/'
'7089ab48d955ab0851fc51cc17a34afd.db')
random_file = ('1060/xyz/1234ab48d955ab0851fc51cc17a34xyz/'
'1234ab48d955ab0851fc51cc17a34xyz.abc')
# trailing "/" indicates empty dir
paths = [
# empty part dir
'240/',
# empty suffix dir
'18/aba/',
# empty hashdir
'1054/27e/d41d8cd98f00b204e9800998ecf8427e/',
# database
db_path,
# non database file
random_file,
]
for path in paths:
path = os.path.join(datadir, path)
os.makedirs(os.path.dirname(path))
if os.path.basename(path):
# our setup requires "directories" to end in "/" (i.e. basename
# is ''); otherwise, create an empty file
open(path, 'w')
# sanity
self.assertEqual({'240', '18', '1054', '1060', '450'},
set(os.listdir(datadir)))
for path in paths:
dirpath = os.path.join(datadir, os.path.dirname(path))
self.assertTrue(os.path.isdir(dirpath))
node_id = 1
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
expected = [
('450', os.path.join(datadir, db_path), node_id),
]
self.assertEqual(results, expected)
# all the empty leaf dirs are cleaned up
for path in paths:
if os.path.basename(path):
check = self.assertTrue
else:
check = self.assertFalse
dirpath = os.path.join(datadir, os.path.dirname(path))
isdir = os.path.isdir(dirpath)
check(isdir, '%r is%s a directory!' % (
dirpath, '' if isdir else ' not'))
# despite the leaves cleaned up it takes a few loops to finish it off
self.assertEqual({'18', '1054', '1060', '450'},
set(os.listdir(datadir)))
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
self.assertEqual(results, expected)
self.assertEqual({'1054', '1060', '450'},
set(os.listdir(datadir)))
results = list(db_replicator.roundrobin_datadirs(
[(datadir, node_id, lambda p: True)]))
self.assertEqual(results, expected)
# non db file in '1060' dir is not deleted and exception is handled
self.assertEqual({'1060', '450'},
set(os.listdir(datadir)))
def test_roundrobin_datadirs(self):
listdir_calls = []
isdir_calls = []
exists_calls = []
shuffle_calls = []
rmdir_calls = []
def _listdir(path):
listdir_calls.append(path)
if not path.startswith('/srv/node/sda/containers') and \
not path.startswith('/srv/node/sdb/containers'):
return []
path = path[len('/srv/node/sdx/containers'):]
if path == '':
return ['123', '456', '789', '9999', "-5", "not-a-partition"]
# 456 will pretend to be a file
# 9999 will be an empty partition with no contents
# -5 and not-a-partition were created by something outside
# Swift
elif path == '/123':
return ['abc', 'def.db'] # def.db will pretend to be a file
elif path == '/123/abc':
# 11111111111111111111111111111abc will pretend to be a file
return ['00000000000000000000000000000abc',
'11111111111111111111111111111abc']
elif path == '/123/abc/00000000000000000000000000000abc':
return ['00000000000000000000000000000abc.db',
# This other.db isn't in the right place, so should be
# ignored later.
'000000000000000000000000000other.db',
'weird1'] # weird1 will pretend to be a dir, if asked
elif path == '/789':
return ['ghi', 'jkl'] # jkl will pretend to be a file
elif path == '/789/ghi':
# 33333333333333333333333333333ghi will pretend to be a file
return ['22222222222222222222222222222ghi',
'33333333333333333333333333333ghi']
elif path == '/789/ghi/22222222222222222222222222222ghi':
return ['22222222222222222222222222222ghi.db',
'weird2'] # weird2 will pretend to be a dir, if asked
elif path == '9999':
return []
elif path == 'not-a-partition':
raise Exception("shouldn't look in not-a-partition")
elif path == '-5':
raise Exception("shouldn't look in -5")
return []
def _isdir(path):
isdir_calls.append(path)
if not path.startswith('/srv/node/sda/containers') and \
not path.startswith('/srv/node/sdb/containers'):
return False
path = path[len('/srv/node/sdx/containers'):]
if path in ('/123', '/123/abc',
'/123/abc/00000000000000000000000000000abc',
'/123/abc/00000000000000000000000000000abc/weird1',
'/789', '/789/ghi',
'/789/ghi/22222222222222222222222222222ghi',
'/789/ghi/22222222222222222222222222222ghi/weird2',
'/9999'):
return True
return False
def _exists(arg):
exists_calls.append(arg)
return True
def _shuffle(arg):
shuffle_calls.append(arg)
def _rmdir(arg):
rmdir_calls.append(arg)
base = 'swift.common.db_replicator.'
with mock.patch(base + 'os.listdir', _listdir), \
mock.patch(base + 'os.path.isdir', _isdir), \
mock.patch(base + 'os.path.exists', _exists), \
mock.patch(base + 'random.shuffle', _shuffle), \
mock.patch(base + 'os.rmdir', _rmdir):
datadirs = [('/srv/node/sda/containers', 1, lambda p: True),
('/srv/node/sdb/containers', 2, lambda p: True)]
results = list(db_replicator.roundrobin_datadirs(datadirs))
# The results show that the .db files are returned, the devices
# interleaved.
self.assertEqual(results, [
('123', '/srv/node/sda/containers/123/abc/'
'00000000000000000000000000000abc/'
'00000000000000000000000000000abc.db', 1),
('123', '/srv/node/sdb/containers/123/abc/'
'00000000000000000000000000000abc/'
'00000000000000000000000000000abc.db', 2),
('789', '/srv/node/sda/containers/789/ghi/'
'22222222222222222222222222222ghi/'
'22222222222222222222222222222ghi.db', 1),
('789', '/srv/node/sdb/containers/789/ghi/'
'22222222222222222222222222222ghi/'
'22222222222222222222222222222ghi.db', 2)])
# The listdir calls show that we only listdir the dirs
self.assertEqual(listdir_calls, [
'/srv/node/sda/containers',
'/srv/node/sda/containers/123',
'/srv/node/sda/containers/123/abc',
'/srv/node/sdb/containers',
'/srv/node/sdb/containers/123',
'/srv/node/sdb/containers/123/abc',
'/srv/node/sda/containers/789',
'/srv/node/sda/containers/789/ghi',
'/srv/node/sdb/containers/789',
'/srv/node/sdb/containers/789/ghi',
'/srv/node/sda/containers/9999',
'/srv/node/sdb/containers/9999'])
# The isdir calls show that we did ask about the things pretending
# to be files at various levels.
self.assertEqual(isdir_calls, [
'/srv/node/sda/containers/123',
'/srv/node/sda/containers/123/abc',
('/srv/node/sda/containers/123/abc/'
'00000000000000000000000000000abc'),
'/srv/node/sdb/containers/123',
'/srv/node/sdb/containers/123/abc',
('/srv/node/sdb/containers/123/abc/'
'00000000000000000000000000000abc'),
('/srv/node/sda/containers/123/abc/'
'11111111111111111111111111111abc'),
'/srv/node/sda/containers/123/def.db',
'/srv/node/sda/containers/456',
'/srv/node/sda/containers/789',
'/srv/node/sda/containers/789/ghi',
('/srv/node/sda/containers/789/ghi/'
'22222222222222222222222222222ghi'),
('/srv/node/sdb/containers/123/abc/'
'11111111111111111111111111111abc'),
'/srv/node/sdb/containers/123/def.db',
'/srv/node/sdb/containers/456',
'/srv/node/sdb/containers/789',
'/srv/node/sdb/containers/789/ghi',
('/srv/node/sdb/containers/789/ghi/'
'22222222222222222222222222222ghi'),
('/srv/node/sda/containers/789/ghi/'
'33333333333333333333333333333ghi'),
'/srv/node/sda/containers/789/jkl',
'/srv/node/sda/containers/9999',
('/srv/node/sdb/containers/789/ghi/'
'33333333333333333333333333333ghi'),
'/srv/node/sdb/containers/789/jkl',
'/srv/node/sdb/containers/9999'])
# The exists calls are the .db files we looked for as we walked the
# structure.
self.assertEqual(exists_calls, [
('/srv/node/sda/containers/123/abc/'
'00000000000000000000000000000abc/'
'00000000000000000000000000000abc.db'),
('/srv/node/sdb/containers/123/abc/'
'00000000000000000000000000000abc/'
'00000000000000000000000000000abc.db'),
('/srv/node/sda/containers/789/ghi/'
'22222222222222222222222222222ghi/'
'22222222222222222222222222222ghi.db'),
('/srv/node/sdb/containers/789/ghi/'
'22222222222222222222222222222ghi/'
'22222222222222222222222222222ghi.db')])
# Shows that we called shuffle twice, once for each device.
self.assertEqual(
shuffle_calls, [['123', '456', '789', '9999'],
['123', '456', '789', '9999']])
# Shows that we called removed the two empty partition directories.
self.assertEqual(
rmdir_calls, ['/srv/node/sda/containers/9999',
'/srv/node/sdb/containers/9999'])
@mock.patch("swift.common.db_replicator.ReplConnection", mock.Mock())
def test_http_connect(self):
node = "node"
partition = "partition"
db_file = __file__
replicator = ConcreteReplicator({})
replicator._http_connect(node, partition, db_file)
expected_hsh = os.path.basename(db_file).split('.', 1)[0]
expected_hsh = expected_hsh.split('_', 1)[0]
db_replicator.ReplConnection.assert_has_calls([
mock.call(node, partition, expected_hsh, replicator.logger)])
class TestHandoffsOnly(unittest.TestCase):
class FakeRing3Nodes(object):
_replicas = 3
# Three nodes, two disks each
devs = [
dict(id=0, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.1', port=6201,
replication_ip='10.0.0.1', replication_port=6201,
device='sdp'),
dict(id=1, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.1', port=6201,
replication_ip='10.0.0.1', replication_port=6201,
device='sdq'),
dict(id=2, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.2', port=6201,
replication_ip='10.0.0.2', replication_port=6201,
device='sdp'),
dict(id=3, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.2', port=6201,
replication_ip='10.0.0.2', replication_port=6201,
device='sdq'),
dict(id=4, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.3', port=6201,
replication_ip='10.0.0.3', replication_port=6201,
device='sdp'),
dict(id=5, region=1, zone=1,
meta='', weight=500.0, ip='10.0.0.3', port=6201,
replication_ip='10.0.0.3', replication_port=6201,
device='sdq'),
]
def __init__(self, *a, **kw):
pass
def get_part(self, account, container=None, obj=None):
return 0
def get_part_nodes(self, part):
nodes = []
for offset in range(self._replicas):
i = (part + offset) % len(self.devs)
nodes.append(self.devs[i])
return nodes
def get_more_nodes(self, part):
for offset in range(self._replicas, len(self.devs)):
i = (part + offset) % len(self.devs)
yield self.devs[i]
def _make_fake_db(self, disk, partition, db_hash):
directories = [
os.path.join(self.root, disk),
os.path.join(self.root, disk, 'containers'),
os.path.join(self.root, disk, 'containers', str(partition)),
os.path.join(self.root, disk, 'containers', str(partition),
db_hash[-3:]),
os.path.join(self.root, disk, 'containers', str(partition),
db_hash[-3:], db_hash)]
for d in directories:
try:
os.mkdir(d)
except OSError as err:
if err.errno != errno.EEXIST:
raise
file_path = os.path.join(directories[-1], db_hash + ".db")
with open(file_path, 'w'):
pass
def setUp(self):
self.root = mkdtemp()
self.logger = debug_logger()
# object disks; they're just here to make sure they don't trip us up
os.mkdir(os.path.join(self.root, 'sdc'))
os.mkdir(os.path.join(self.root, 'sdc', 'objects'))
os.mkdir(os.path.join(self.root, 'sdd'))
os.mkdir(os.path.join(self.root, 'sdd', 'objects'))
# part 0 belongs on sdp
self._make_fake_db('sdp', 0, '010101013cf2b7979af9eaa71cb67220')
# part 1 does not belong on sdp
self._make_fake_db('sdp', 1, 'abababab2b5368158355e799323b498d')
# part 1 belongs on sdq
self._make_fake_db('sdq', 1, '02020202e30f696a3cfa63d434a3c94e')
# part 2 does not belong on sdq
self._make_fake_db('sdq', 2, 'bcbcbcbc15d3835053d568c57e2c83b5')
def tearDown(self):
rmtree(self.root, ignore_errors=True)
def test_scary_warnings(self):
replicator = ConcreteReplicator({
'handoffs_only': 'yes',
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
}, logger=self.logger)
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object'), \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once()
self.assertEqual(
self.logger.get_lines_for_level('warning'),
[('Starting replication pass with handoffs_only and/or '
'handoffs_delete enabled. These '
'modes are not intended for normal operation; use '
'these options with care.'),
('Finished replication pass with handoffs_only and/or '
'handoffs_delete enabled. If these are no longer required, '
'disable them.')])
def test_skips_primary_partitions(self):
replicator = ConcreteReplicator({
'handoffs_only': 'yes',
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once()
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('1', os.path.join(
self.root, 'sdp', 'containers', '1', '98d',
'abababab2b5368158355e799323b498d',
'abababab2b5368158355e799323b498d.db'), 0),
mock.call('2', os.path.join(
self.root, 'sdq', 'containers', '2', '3b5',
'bcbcbcbc15d3835053d568c57e2c83b5',
'bcbcbcbc15d3835053d568c57e2c83b5.db'), 1)])
def test_override_partitions(self):
replicator = ConcreteReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(partitions="0,2")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0),
mock.call('2', os.path.join(
self.root, 'sdq', 'containers', '2', '3b5',
'bcbcbcbc15d3835053d568c57e2c83b5',
'bcbcbcbc15d3835053d568c57e2c83b5.db'), 1)])
def test_override_devices(self):
replicator = ConcreteReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(devices="sdp")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0),
mock.call('1', os.path.join(
self.root, 'sdp', 'containers', '1', '98d',
'abababab2b5368158355e799323b498d',
'abababab2b5368158355e799323b498d.db'), 0)])
def test_override_devices_and_partitions(self):
replicator = ConcreteReplicator({
'devices': self.root,
'bind_port': 6201,
'mount_check': 'no',
})
with patch.object(db_replicator, 'whataremyips',
return_value=['10.0.0.1']), \
patch.object(replicator, '_replicate_object') as mock_repl, \
patch.object(replicator, 'ring', self.FakeRing3Nodes()):
replicator.run_once(partitions="0,2", devices="sdp")
self.assertEqual(sorted(mock_repl.mock_calls), [
mock.call('0', os.path.join(
self.root, 'sdp', 'containers', '0', '220',
'010101013cf2b7979af9eaa71cb67220',
'010101013cf2b7979af9eaa71cb67220.db'), 0)])
class TestReplToNode(unittest.TestCase):
def setUp(self):
db_replicator.ring = FakeRing()
self.delete_db_calls = []
self.broker = FakeBroker()
self.replicator = ConcreteReplicator({'per_diff': 10})
self.fake_node = {'ip': '127.0.0.1', 'device': 'sda1', 'port': 1000}
self.fake_info = {'id': 'a', 'point': -1, 'max_row': 20, 'hash': 'b',
'created_at': 100, 'put_timestamp': 0,
'delete_timestamp': 0, 'count': 0,
'metadata': json.dumps({
'Test': ('Value', normalize_timestamp(1))})}
self.replicator.logger = mock.Mock()
self.replicator._rsync_db = mock.Mock(return_value=True)
self.replicator._usync_db = mock.Mock(return_value=True)
self.http = ReplHttp('{"id": 3, "point": -1}')
self.replicator._http_connect = lambda *args: self.http
def test_repl_to_node_usync_success(self):
rinfo = {"id": 3, "point": -1, "max_row": 10, "hash": "c"}
self.http = ReplHttp(json.dumps(rinfo))
local_sync = self.broker.get_sync()
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
self.replicator._usync_db.assert_has_calls([
mock.call(max(rinfo['point'], local_sync), self.broker,
self.http, rinfo['id'], self.fake_info['id'])
])
def test_repl_to_node_rsync_success(self):
rinfo = {"id": 3, "point": -1, "max_row": 9, "hash": "c"}
self.http = ReplHttp(json.dumps(rinfo))
self.broker.get_sync()
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
self.replicator.logger.increment.assert_has_calls([
mock.call.increment('remote_merges')
])
self.replicator._rsync_db.assert_has_calls([
mock.call(self.broker, self.fake_node, self.http,
self.fake_info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(self.fake_info['count'] / 2000),
different_region=False)
])
def test_repl_to_node_already_in_sync(self):
rinfo = {"id": 3, "point": -1, "max_row": 20, "hash": "b"}
self.http = ReplHttp(json.dumps(rinfo))
self.broker.get_sync()
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
self.assertEqual(self.replicator._rsync_db.call_count, 0)
self.assertEqual(self.replicator._usync_db.call_count, 0)
def test_repl_to_node_metadata_update(self):
now = Timestamp(time.time()).internal
rmetadata = {"X-Container-Sysmeta-Test": ("XYZ", now)}
rinfo = {"id": 3, "point": -1, "max_row": 20, "hash": "b",
"metadata": json.dumps(rmetadata)}
self.http = ReplHttp(json.dumps(rinfo))
self.broker.get_sync()
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
metadata = self.broker.metadata
self.assertIn("X-Container-Sysmeta-Test", metadata)
self.assertEqual("XYZ", metadata["X-Container-Sysmeta-Test"][0])
self.assertEqual(now, metadata["X-Container-Sysmeta-Test"][1])
def test_repl_to_node_not_found(self):
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=404)
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info, False), True)
self.replicator.logger.increment.assert_has_calls([
mock.call.increment('rsyncs')
])
self.replicator._rsync_db.assert_has_calls([
mock.call(self.broker, self.fake_node, self.http,
self.fake_info['id'], different_region=False)
])
def test_repl_to_node_drive_not_mounted(self):
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=507)
self.assertRaises(DriveNotMounted, self.replicator._repl_to_node,
self.fake_node, FakeBroker(), '0', self.fake_info)
def test_repl_to_node_300_status(self):
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=300)
self.assertFalse(self.replicator._repl_to_node(
self.fake_node, FakeBroker(), '0', self.fake_info))
def test_repl_to_node_not_response(self):
self.http = mock.Mock(replicate=mock.Mock(return_value=None))
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, FakeBroker(), '0', self.fake_info), False)
def test_repl_to_node_small_container_always_usync(self):
# Tests that a small container that is > 50% out of sync will
# still use usync.
rinfo = {"id": 3, "point": -1, "hash": "c"}
# Turn per_diff back to swift's default.
self.replicator.per_diff = 1000
for r, l in ((5, 20), (40, 100), (450, 1000), (550, 1500)):
rinfo['max_row'] = r
self.fake_info['max_row'] = l
self.replicator._usync_db = mock.Mock(return_value=True)
self.http = ReplHttp(json.dumps(rinfo))
local_sync = self.broker.get_sync()
self.assertEqual(self.replicator._repl_to_node(
self.fake_node, self.broker, '0', self.fake_info), True)
self.replicator._usync_db.assert_has_calls([
mock.call(max(rinfo['point'], local_sync), self.broker,
self.http, rinfo['id'], self.fake_info['id'])
])
class ExampleReplicator(db_replicator.Replicator):
# We need to have a valid server_type
server_type = 'object'
brokerclass = ExampleBroker
datadir = 'fake'
default_port = 1000
class TestReplicatorSync(unittest.TestCase):
# override in subclass
backend = ExampleReplicator.brokerclass
datadir = ExampleReplicator.datadir
replicator_daemon = ExampleReplicator
replicator_rpc = db_replicator.ReplicatorRpc
def setUp(self):
self.root = mkdtemp()
self.rpc = self.replicator_rpc(
self.root, self.datadir, self.backend, mount_check=False,
logger=debug_logger())
FakeReplConnection = attach_fake_replication_rpc(self.rpc)
self._orig_ReplConnection = db_replicator.ReplConnection
db_replicator.ReplConnection = FakeReplConnection
self._orig_Ring = db_replicator.ring.Ring
self._ring = unit.FakeRing()
db_replicator.ring.Ring = lambda *args, **kwargs: self._get_ring()
self.logger = debug_logger()
def tearDown(self):
db_replicator.ReplConnection = self._orig_ReplConnection
db_replicator.ring.Ring = self._orig_Ring
rmtree(self.root)
def _get_ring(self):
return self._ring
def _get_broker(self, account, container=None, node_index=0):
hash_ = hash_path(account, container)
part, nodes = self._ring.get_nodes(account, container)
drive = nodes[node_index]['device']
db_path = os.path.join(self.root, drive,
storage_directory(self.datadir, part, hash_),
hash_ + '.db')
return self.backend(db_path, account=account, container=container)
def _get_broker_part_node(self, broker):
part, nodes = self._ring.get_nodes(broker.account, broker.container)
storage_dir = broker.db_file[len(self.root):].lstrip(os.path.sep)
broker_device = storage_dir.split(os.path.sep, 1)[0]
for node in nodes:
if node['device'] == broker_device:
return part, node
def _get_daemon(self, node, conf_updates):
conf = {
'devices': self.root,
'recon_cache_path': self.root,
'mount_check': 'false',
'bind_port': node['replication_port'],
}
if conf_updates:
conf.update(conf_updates)
return self.replicator_daemon(conf, logger=self.logger)
def _install_fake_rsync_file(self, daemon, captured_calls=None):
def _rsync_file(db_file, remote_file, **kwargs):
if captured_calls is not None:
captured_calls.append((db_file, remote_file, kwargs))
remote_server, remote_path = remote_file.split('/', 1)
dest_path = os.path.join(self.root, remote_path)
copy(db_file, dest_path)
return True
daemon._rsync_file = _rsync_file
def _run_once(self, node, conf_updates=None, daemon=None):
daemon = daemon or self._get_daemon(node, conf_updates)
self._install_fake_rsync_file(daemon)
with mock.patch('swift.common.db_replicator.whataremyips',
new=lambda *a, **kw: [node['replication_ip']]), \
unit.mock_check_drive(isdir=not daemon.mount_check,
ismount=daemon.mount_check):
daemon.run_once()
return daemon
def test_local_ids(self):
for drive in ('sda', 'sdb', 'sdd'):
os.makedirs(os.path.join(self.root, drive, self.datadir))
for node in self._ring.devs:
daemon = self._run_once(node)
if node['device'] == 'sdc':
self.assertEqual(daemon._local_device_ids, {})
else:
self.assertEqual(daemon._local_device_ids,
{node['id']: node})
def test_clean_up_after_deleted_brokers(self):
broker = self._get_broker('a', 'c', node_index=0)
part, node = self._get_broker_part_node(broker)
part = str(part)
daemon = self._run_once(node)
# create a super old broker and delete it!
forever_ago = time.time() - daemon.reclaim_age
put_timestamp = normalize_timestamp(forever_ago - 2)
delete_timestamp = normalize_timestamp(forever_ago - 1)
broker.initialize(put_timestamp)
broker.delete_db(delete_timestamp)
# if we have a container broker make sure it's reported
if hasattr(broker, 'reported'):
info = broker.get_info()
broker.reported(info['put_timestamp'],
info['delete_timestamp'],
info['object_count'],
info['bytes_used'])
info = broker.get_replication_info()
self.assertTrue(daemon.report_up_to_date(info))
# we have a part dir
part_root = os.path.join(self.root, node['device'], self.datadir)
parts = os.listdir(part_root)
self.assertEqual([part], parts)
# with a single suffix
suff = os.listdir(os.path.join(part_root, part))
self.assertEqual(1, len(suff))
# running replicator will remove the deleted db
daemon = self._run_once(node, daemon=daemon)
self.assertEqual(1, daemon.stats['remove'])
# which also takes out the empty part dir
parts = os.listdir(part_root)
self.assertEqual(0, len(parts))
def test_rsync_then_merge(self):
# setup current db (and broker)
broker = self._get_broker('a', 'c', node_index=0)
part, node = self._get_broker_part_node(broker)
part = str(part)
put_timestamp = normalize_timestamp(time.time())
broker.initialize(put_timestamp)
put_metadata = {'example-meta': ['bah', put_timestamp]}
broker.update_metadata(put_metadata)
# sanity (re-open, and the db keeps the metadata)
broker = self._get_broker('a', 'c', node_index=0)
self.assertEqual(put_metadata, broker.metadata)
# create rsynced db in tmp dir
obj_hash = hash_path('a', 'c')
rsynced_db_broker = self.backend(
os.path.join(self.root, node['device'], 'tmp', obj_hash + '.db'),
account='a', container='b')
rsynced_db_broker.initialize(put_timestamp)
# do rysnc_then_merge
rpc = db_replicator.ReplicatorRpc(
self.root, self.datadir, self.backend, False)
response = rpc.dispatch((node['device'], part, obj_hash),
['rsync_then_merge', obj_hash + '.db', 'arg2'])
# sanity
self.assertEqual('204 No Content', response.status)
self.assertEqual(204, response.status_int)
# re-open the db
broker = self._get_broker('a', 'c', node_index=0)
# keep the metadata in existing db
self.assertEqual(put_metadata, broker.metadata)
def test_replicator_sync(self):
# setup current db (and broker)
broker = self._get_broker('a', 'c', node_index=0)
part, node = self._get_broker_part_node(broker)
part = str(part)
put_timestamp = normalize_timestamp(time.time())
broker.initialize(put_timestamp)
put_metadata = {'example-meta': ['bah', put_timestamp]}
sync_local_metadata = {
"meta1": ["data1", put_timestamp],
"meta2": ["data2", put_timestamp]}
broker.update_metadata(put_metadata)
# sanity (re-open, and the db keeps the metadata)
broker = self._get_broker('a', 'c', node_index=0)
self.assertEqual(put_metadata, broker.metadata)
# do rysnc_then_merge
rpc = db_replicator.ReplicatorRpc(
self.root, self.datadir, ExampleBroker, False)
response = rpc.sync(
broker, (broker.get_sync('id_') + 1, 12345, 'id_',
put_timestamp, put_timestamp, '0',
json.dumps(sync_local_metadata)))
# sanity
self.assertEqual('200 OK', response.status)
self.assertEqual(200, response.status_int)
# re-open the db
broker = self._get_broker('a', 'c', node_index=0)
# keep the both metadata in existing db and local db
expected = put_metadata.copy()
expected.update(sync_local_metadata)
self.assertEqual(expected, broker.metadata)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_db_replicator.py |
# Copyright (c) 2021 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from time import time
from swift.common.error_limiter import ErrorLimiter
from test.unit import FakeRing
class TestErrorLimiter(unittest.TestCase):
def setUp(self):
self.ring = FakeRing()
def test_init_config(self):
config = {'suppression_interval': 100.9,
'suppression_limit': 5}
limiter = ErrorLimiter(**config)
self.assertEqual(limiter.suppression_interval, 100.9)
self.assertEqual(limiter.suppression_limit, 5)
config = {'suppression_interval': '100.9',
'suppression_limit': '5'}
limiter = ErrorLimiter(**config)
self.assertEqual(limiter.suppression_interval, 100.9)
self.assertEqual(limiter.suppression_limit, 5)
def test_init_bad_config(self):
with self.assertRaises(ValueError):
ErrorLimiter(suppression_interval='bad',
suppression_limit=1)
with self.assertRaises(TypeError):
ErrorLimiter(suppression_interval=None,
suppression_limit=1)
with self.assertRaises(ValueError):
ErrorLimiter(suppression_interval=0,
suppression_limit='bad')
with self.assertRaises(TypeError):
ErrorLimiter(suppression_interval=0,
suppression_limit=None)
def test_is_limited(self):
node = self.ring.devs[-1]
limiter = ErrorLimiter(suppression_interval=60, suppression_limit=10)
now = time()
with mock.patch('swift.common.error_limiter.time', return_value=now):
self.assertFalse(limiter.is_limited(node))
limiter.limit(node)
self.assertTrue(limiter.is_limited(node))
node_key = limiter.node_key(node)
self.assertEqual(limiter.stats.get(node_key),
{'errors': limiter.suppression_limit + 1,
'last_error': now})
def test_increment(self):
node = self.ring.devs[-1]
limiter = ErrorLimiter(suppression_interval=60, suppression_limit=10)
node_key = limiter.node_key(node)
for i in range(limiter.suppression_limit):
self.assertFalse(limiter.increment(node))
self.assertEqual(i + 1, limiter.stats.get(node_key)['errors'])
self.assertFalse(limiter.is_limited(node))
# A few more to make sure it is > suppression_limit
for i in range(1, 4):
self.assertTrue(limiter.increment(node))
self.assertEqual(limiter.suppression_limit + i,
limiter.stats.get(node_key)['errors'])
self.assertTrue(limiter.is_limited(node))
# Simulate time with no errors have gone by.
last_time = limiter.stats.get(node_key)['last_error']
now = last_time + limiter.suppression_interval + 1
with mock.patch('swift.common.error_limiter.time',
return_value=now):
self.assertFalse(limiter.is_limited(node))
self.assertFalse(limiter.stats.get(node_key))
def test_node_key(self):
limiter = ErrorLimiter(suppression_interval=60, suppression_limit=10)
node = self.ring.devs[0]
expected = '%s:%s/%s' % (node['ip'], node['port'], node['device'])
self.assertEqual(expected, limiter.node_key(node))
| swift-master | test/unit/common/test_error_limiter.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.db"""
import contextlib
import os
import sys
import unittest
from tempfile import mkdtemp
from shutil import rmtree, copy
from uuid import uuid4
import six.moves.cPickle as pickle
import base64
import json
import sqlite3
import itertools
import time
import random
from mock import patch, MagicMock
from eventlet.timeout import Timeout
from six.moves import range
import six
import swift.common.db
from swift.common.constraints import \
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE
from swift.common.db import chexor, dict_factory, get_db_connection, \
DatabaseBroker, DatabaseConnectionError, DatabaseAlreadyExists, \
GreenDBConnection, PICKLE_PROTOCOL, zero_like, TombstoneReclaimer
from swift.common.utils import normalize_timestamp, mkdirs, Timestamp
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPException
from test.unit import make_timestamp_iter, generate_db_path
class TestHelperFunctions(unittest.TestCase):
def test_zero_like(self):
expectations = {
# value => expected
None: True,
True: False,
'': True,
'asdf': False,
0: True,
1: False,
'0': True,
'1': False,
}
errors = []
for value, expected in expectations.items():
rv = zero_like(value)
if rv != expected:
errors.append('zero_like(%r) => %r expected %r' % (
value, rv, expected))
if errors:
self.fail('Some unexpected return values:\n' + '\n'.join(errors))
class TestDatabaseConnectionError(unittest.TestCase):
def test_str(self):
err = \
DatabaseConnectionError(':memory:', 'No valid database connection')
self.assertIn(':memory:', str(err))
self.assertIn('No valid database connection', str(err))
err = DatabaseConnectionError(':memory:',
'No valid database connection',
timeout=1357)
self.assertIn(':memory:', str(err))
self.assertIn('No valid database connection', str(err))
self.assertIn('1357', str(err))
class TestDictFactory(unittest.TestCase):
def test_normal_case(self):
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
conn.commit()
curs = conn.execute('SELECT one, two FROM test')
self.assertEqual(dict_factory(curs, next(curs)),
{'one': 'abc', 'two': 123})
self.assertEqual(dict_factory(curs, next(curs)),
{'one': 'def', 'two': 456})
class TestChexor(unittest.TestCase):
def test_normal_case(self):
self.assertEqual(
chexor('d41d8cd98f00b204e9800998ecf8427e',
'new name', normalize_timestamp(1)),
'4f2ea31ac14d4273fe32ba08062b21de')
def test_invalid_old_hash(self):
self.assertRaises(ValueError, chexor, 'oldhash', 'name',
normalize_timestamp(1))
def test_no_name(self):
self.assertRaises(Exception, chexor,
'd41d8cd98f00b204e9800998ecf8427e', None,
normalize_timestamp(1))
def test_chexor(self):
ts = (normalize_timestamp(ts) for ts in
itertools.count(int(time.time())))
objects = [
('frank', next(ts)),
('bob', next(ts)),
('tom', next(ts)),
('frank', next(ts)),
('tom', next(ts)),
('bob', next(ts)),
]
hash_ = '0'
random.shuffle(objects)
for obj in objects:
hash_ = chexor(hash_, *obj)
other_hash = '0'
random.shuffle(objects)
for obj in objects:
other_hash = chexor(other_hash, *obj)
self.assertEqual(hash_, other_hash)
class TestGreenDBConnection(unittest.TestCase):
def test_execute_when_locked(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptCursor.execute = MagicMock(side_effect=db_error)
with patch('sqlite3.Cursor', new=InterceptCursor):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.execute, 'select 1')
self.assertTrue(InterceptCursor.execute.called)
self.assertEqual(InterceptCursor.execute.call_args_list,
list((InterceptCursor.execute.call_args,) *
InterceptCursor.execute.call_count))
def text_commit_when_locked(self):
# This test is dependent on the code under test calling commit and
# commit as sqlite3.Connection.commit in a subclass.
class InterceptConnection(sqlite3.Connection):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptConnection.commit = MagicMock(side_effect=db_error)
with patch('sqlite3.Connection', new=InterceptConnection):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.commit)
self.assertTrue(InterceptConnection.commit.called)
self.assertEqual(InterceptConnection.commit.call_args_list,
list((InterceptConnection.commit.call_args,) *
InterceptConnection.commit.call_count))
class TestDbBase(unittest.TestCase):
server_type = 'container'
testdir = None
def setUp(self):
self.testdir = mkdtemp()
self.db_path = self.get_db_path()
def tearDown(self):
rmtree(self.testdir, ignore_errors=True)
def get_db_path(self):
return generate_db_path(self.testdir, self.server_type)
class TestGetDBConnection(TestDbBase):
def setUp(self):
super(TestGetDBConnection, self).setUp()
self.db_path = self.init_db_path()
def init_db_path(self):
# Test ContainerBroker.empty
db_path = self.get_db_path()
broker = ExampleBroker(db_path, account='a')
broker.initialize(Timestamp.now().internal, 0)
return db_path
def test_normal_case(self):
conn = get_db_connection(self.db_path)
self.assertTrue(hasattr(conn, 'execute'))
def test_invalid_path(self):
self.assertRaises(DatabaseConnectionError, get_db_connection,
'invalid database path / name')
def test_locked_db(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
mock_db_cmd = MagicMock(side_effect=db_error)
InterceptCursor.execute = mock_db_cmd
with patch('sqlite3.Cursor', new=InterceptCursor):
self.assertRaises(Timeout, get_db_connection,
self.db_path, timeout=0.1)
self.assertTrue(mock_db_cmd.called)
self.assertEqual(mock_db_cmd.call_args_list,
list((mock_db_cmd.call_args,) *
mock_db_cmd.call_count))
class ExampleBroker(DatabaseBroker):
"""
Concrete enough implementation of a DatabaseBroker.
"""
db_type = 'test'
db_contains_type = 'test'
db_reclaim_timestamp = 'created_at'
def _initialize(self, conn, put_timestamp, **kwargs):
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
conn.executescript('''
CREATE TABLE test_stat (
account TEXT,
test_count INTEGER DEFAULT 0,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
deleted INTEGER DEFAULT 0
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + (1 - new.deleted);
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - (1 - old.deleted);
END;
''')
conn.execute("""
INSERT INTO test_stat (
account, created_at, id, put_timestamp, status_changed_at, status)
VALUES (?, ?, ?, ?, ?, ?);
""", (self.account, Timestamp.now().internal, str(uuid4()),
put_timestamp, put_timestamp, ''))
def merge_items(self, item_list):
with self.get() as conn:
for rec in item_list:
conn.execute(
'DELETE FROM test WHERE name = ? and created_at < ?', (
rec['name'], rec['created_at']))
if not conn.execute(
'SELECT 1 FROM test WHERE name = ?',
(rec['name'],)).fetchall():
conn.execute('''
INSERT INTO test (name, created_at, deleted)
VALUES (?, ?, ?)''', (
rec['name'], rec['created_at'], rec['deleted']))
conn.commit()
def _commit_puts_load(self, item_list, entry):
(name, timestamp, deleted) = entry
item_list.append({
'name': name,
'created_at': timestamp,
'deleted': deleted,
})
def _load_item(self, name, timestamp, deleted):
if self.db_file == ':memory:':
record = {
'name': name,
'created_at': timestamp,
'deleted': deleted,
}
self.merge_items([record])
return
with open(self.pending_file, 'a+b') as fp:
fp.write(b':')
fp.write(base64.b64encode(pickle.dumps(
(name, timestamp, deleted),
protocol=PICKLE_PROTOCOL)))
fp.flush()
def put_test(self, name, timestamp):
self._load_item(name, timestamp, 0)
def delete_test(self, name, timestamp):
self._load_item(name, timestamp, 1)
def _delete_db(self, conn, timestamp):
conn.execute("""
UPDATE test_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _is_deleted(self, conn):
info = conn.execute('SELECT * FROM test_stat').fetchone()
return (info['test_count'] in (None, '', 0, '0')) and \
(Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp']))
class TestExampleBroker(TestDbBase):
"""
Tests that use the mostly Concrete enough ExampleBroker to exercise some
of the abstract methods on DatabaseBroker.
"""
broker_class = ExampleBroker
policy = 0
server_type = 'example'
def setUp(self):
super(TestExampleBroker, self).setUp()
self.ts = make_timestamp_iter()
def test_delete_db(self):
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(next(self.ts).internal)
broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
def test_merge_timestamps_simple_delete(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp)
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp, '0')
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], '0')
self.assertEqual(info['status_changed_at'], put_timestamp)
# delete
delete_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertTrue(info['status_changed_at'] > Timestamp(put_timestamp))
def put_item(self, broker, timestamp):
broker.put_test('test', timestamp)
def delete_item(self, broker, timestamp):
broker.delete_test('test', timestamp)
def test_merge_timestamps_delete_with_objects(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp, '0')
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], '0')
self.assertEqual(info['status_changed_at'], put_timestamp)
# add object
self.put_item(broker, next(self.ts).internal)
self.assertEqual(broker.get_info()[
'%s_count' % broker.db_contains_type], 1)
# delete
delete_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
# status is unchanged
self.assertEqual(info['status_changed_at'], put_timestamp)
# count is causing status to hold on
self.delete_item(broker, next(self.ts).internal)
self.assertEqual(broker.get_info()[
'%s_count' % broker.db_contains_type], 0)
self.assertTrue(broker.is_deleted())
def test_merge_timestamps_simple_recreate(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
virgin_status_changed_at = broker.get_info()['status_changed_at']
created_at = broker.get_info()['created_at']
delete_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
orig_status_changed_at = info['status_changed_at']
self.assertTrue(orig_status_changed_at >
Timestamp(virgin_status_changed_at))
# recreate
recreate_timestamp = next(self.ts).internal
status_changed_at = time.time()
with patch('swift.common.db.time.time', new=lambda: status_changed_at):
broker.merge_timestamps(created_at, recreate_timestamp, '0')
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], recreate_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(Timestamp(status_changed_at).normal,
info['status_changed_at'])
def test_merge_timestamps_recreate_with_objects(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
# delete
delete_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
orig_status_changed_at = info['status_changed_at']
self.assertTrue(Timestamp(orig_status_changed_at) >=
Timestamp(put_timestamp))
# add object
self.put_item(broker, next(self.ts).internal)
count_key = '%s_count' % broker.db_contains_type
self.assertEqual(broker.get_info()[count_key], 1)
self.assertFalse(broker.is_deleted())
# recreate
recreate_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, recreate_timestamp, '0')
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], recreate_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], orig_status_changed_at)
# count is not causing status to hold on
self.delete_item(broker, next(self.ts).internal)
self.assertFalse(broker.is_deleted())
def test_merge_timestamps_update_put_no_status_change(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
info = broker.get_info()
orig_status_changed_at = info['status_changed_at']
created_at = info['created_at']
new_put_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, new_put_timestamp, '0')
info = broker.get_info()
self.assertEqual(new_put_timestamp, info['put_timestamp'])
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
def test_merge_timestamps_update_delete_no_status_change(self):
put_timestamp = next(self.ts).internal
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp,
next(self.ts).internal)
orig_status_changed_at = broker.get_info()['status_changed_at']
new_delete_timestamp = next(self.ts).internal
broker.merge_timestamps(created_at, put_timestamp,
new_delete_timestamp)
info = broker.get_info()
self.assertEqual(new_delete_timestamp, info['delete_timestamp'])
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
def test_get_max_row(self):
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(next(self.ts).internal,
storage_policy_index=int(self.policy))
self.assertEqual(-1, broker.get_max_row())
self.put_item(broker, next(self.ts).internal)
# commit pending file into db
broker._commit_puts()
self.assertEqual(1, broker.get_max_row())
self.delete_item(broker, next(self.ts).internal)
broker._commit_puts()
self.assertEqual(2, broker.get_max_row())
self.put_item(broker, next(self.ts).internal)
broker._commit_puts()
self.assertEqual(3, broker.get_max_row())
def test_get_info(self):
broker = self.broker_class(self.db_path, account='test', container='c')
created_at = time.time()
with patch('swift.common.db.time.time', new=lambda: created_at):
broker.initialize(Timestamp(1).internal,
storage_policy_index=int(self.policy))
info = broker.get_info()
count_key = '%s_count' % broker.db_contains_type
expected = {
count_key: 0,
'created_at': Timestamp(created_at).internal,
'put_timestamp': Timestamp(1).internal,
'status_changed_at': Timestamp(1).internal,
'delete_timestamp': '0',
}
for k, v in expected.items():
self.assertEqual(info[k], v,
'mismatch for %s, %s != %s' % (
k, info[k], v))
def test_get_raw_metadata(self):
broker = self.broker_class(self.db_path, account='test', container='c')
broker.initialize(Timestamp(0).internal,
storage_policy_index=int(self.policy))
self.assertEqual(broker.metadata, {})
self.assertEqual(broker.get_raw_metadata(), '')
# This is not obvious. The actual JSON in the database is the same:
# '{"test\\u062a": ["value\\u062a", "0000000001.00000"]}'
# The only difference is what reading it produces on py2 and py3.
# We use native strings for metadata (see native_str_keys_and_values),
# so types are different.
if six.PY2:
key = u'test\u062a'.encode('utf-8')
value = u'value\u062a'.encode('utf-8')
else:
key = u'test\u062a'
value = u'value\u062a'
metadata = {
key: [value, Timestamp(1).internal]
}
broker.update_metadata(metadata)
self.assertEqual(broker.metadata, metadata)
self.assertEqual(broker.get_raw_metadata(),
json.dumps(metadata))
def test_put_timestamp(self):
broker = self.broker_class(self.db_path, account='a', container='c')
orig_put_timestamp = next(self.ts).internal
broker.initialize(orig_put_timestamp,
storage_policy_index=int(self.policy))
self.assertEqual(broker.get_info()['put_timestamp'],
orig_put_timestamp)
# put_timestamp equal - no change
broker.update_put_timestamp(orig_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
orig_put_timestamp)
# put_timestamp newer - gets newer
newer_put_timestamp = next(self.ts).internal
broker.update_put_timestamp(newer_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
newer_put_timestamp)
# put_timestamp older - no change
broker.update_put_timestamp(orig_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
newer_put_timestamp)
def test_status_changed_at(self):
broker = self.broker_class(self.db_path, account='test', container='c')
put_timestamp = next(self.ts).internal
created_at = time.time()
with patch('swift.common.db.time.time', new=lambda: created_at):
broker.initialize(put_timestamp,
storage_policy_index=int(self.policy))
self.assertEqual(broker.get_info()['status_changed_at'],
put_timestamp)
self.assertEqual(broker.get_info()['created_at'],
Timestamp(created_at).internal)
status_changed_at = next(self.ts).internal
broker.update_status_changed_at(status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
# save the old and get a new status_changed_at
old_status_changed_at, status_changed_at = \
status_changed_at, next(self.ts).internal
broker.update_status_changed_at(status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
# status changed at won't go backwards...
broker.update_status_changed_at(old_status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
def test_get_syncs(self):
broker = self.broker_class(self.db_path, account='a', container='c')
broker.initialize(Timestamp.now().internal,
storage_policy_index=int(self.policy))
self.assertEqual([], broker.get_syncs())
broker.merge_syncs([{'sync_point': 1, 'remote_id': 'remote1'}])
self.assertEqual([{'sync_point': 1, 'remote_id': 'remote1'}],
broker.get_syncs())
self.assertEqual([], broker.get_syncs(incoming=False))
broker.merge_syncs([{'sync_point': 2, 'remote_id': 'remote2'}],
incoming=False)
self.assertEqual([{'sync_point': 2, 'remote_id': 'remote2'}],
broker.get_syncs(incoming=False))
def test_commit_pending(self):
broker = self.broker_class(os.path.join(self.testdir, 'test.db'),
account='a', container='c')
broker.initialize(next(self.ts).internal,
storage_policy_index=int(self.policy))
self.put_item(broker, next(self.ts).internal)
qry = 'select * from %s_stat' % broker.db_type
with broker.get() as conn:
rows = [dict(x) for x in conn.execute(qry)]
info = rows[0]
count_key = '%s_count' % broker.db_contains_type
self.assertEqual(0, info[count_key])
# commit pending file into db
broker._commit_puts()
self.assertEqual(1, broker.get_info()[count_key])
def test_maybe_get(self):
broker = self.broker_class(os.path.join(self.testdir, 'test.db'),
account='a', container='c')
broker.initialize(next(self.ts).internal,
storage_policy_index=int(self.policy))
qry = 'select account from %s_stat' % broker.db_type
with broker.maybe_get(None) as conn:
rows = [dict(x) for x in conn.execute(qry)]
self.assertEqual([{'account': 'a'}], rows)
self.assertEqual(conn, broker.conn)
with broker.get() as other_conn:
self.assertEqual(broker.conn, None)
with broker.maybe_get(other_conn) as identity_conn:
self.assertIs(other_conn, identity_conn)
self.assertEqual(broker.conn, None)
self.assertEqual(broker.conn, None)
self.assertEqual(broker.conn, conn)
class TestDatabaseBroker(TestDbBase):
def test_DB_PREALLOCATION_setting(self):
u = uuid4().hex
b = DatabaseBroker(u)
swift.common.db.DB_PREALLOCATION = False
b._preallocate()
swift.common.db.DB_PREALLOCATION = True
self.assertRaises(OSError, b._preallocate)
def test_memory_db_init(self):
broker = DatabaseBroker(self.db_path)
self.assertEqual(broker.db_file, self.db_path)
self.assertRaises(AttributeError, broker.initialize,
normalize_timestamp('0'))
def test_disk_db_init(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertEqual(broker.db_file, db_file)
self.assertIsNone(broker.conn)
def test_disk_preallocate(self):
test_size = [-1]
def fallocate_stub(fd, size):
test_size[0] = size
with patch('swift.common.db.fallocate', fallocate_stub):
db_file = os.path.join(self.testdir, 'pre.db')
# Write 1 byte and hope that the fs will allocate less than 1 MB.
f = open(db_file, "w")
f.write('@')
f.close()
b = DatabaseBroker(db_file)
b._preallocate()
# We only wrote 1 byte, so we should end with the 1st step or 1 MB.
self.assertEqual(test_size[0], 1024 * 1024)
def test_initialize(self):
self.assertRaises(AttributeError,
DatabaseBroker(self.db_path).initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
stub_dict.clear()
stub_dict['args'] = args
stub_dict.update(kwargs)
broker = DatabaseBroker(self.db_path)
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
self.assertRaises(DatabaseAlreadyExists,
broker.initialize, normalize_timestamp('1'))
def test_delete_db(self):
meta = {'foo': ['bar', normalize_timestamp('0')]}
def init_stub(conn, put_timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('''CREATE TABLE test_stat (
id TEXT, put_timestamp TEXT, delete_timestamp TEXT,
status TEXT, status_changed_at TEXT, metadata TEXT)''')
conn.execute(
'''INSERT INTO test_stat (
id, put_timestamp, delete_timestamp, status,
status_changed_at, metadata) VALUES (?, ?, ?, ?, ?, ?)''',
(str(uuid4), put_timestamp, '0', '', '0', json.dumps(meta)))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
def do_test(expected_metadata, delete_meta_whitelist=None):
if not delete_meta_whitelist:
delete_meta_whitelist = []
broker = DatabaseBroker(self.get_db_path())
broker.delete_meta_whitelist = delete_meta_whitelist
broker.db_type = 'test'
broker._initialize = init_stub
# Initializes a good broker for us
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEqual('0', info['delete_timestamp'])
self.assertEqual('', info['status'])
self.assertIsNotNone(broker.conn)
broker.delete_db(normalize_timestamp('2'))
info = broker.get_info()
self.assertEqual(normalize_timestamp('2'),
info['delete_timestamp'])
self.assertEqual('DELETED', info['status'])
# check meta
m2 = broker.metadata
self.assertEqual(m2, expected_metadata)
broker = DatabaseBroker(os.path.join(self.testdir,
'%s.db' % uuid4()))
broker.delete_meta_whitelist = delete_meta_whitelist
broker.db_type = 'test'
broker._initialize = init_stub
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEqual('0', info['delete_timestamp'])
self.assertEqual('', info['status'])
broker.delete_db(normalize_timestamp('2'))
info = broker.get_info()
self.assertEqual(normalize_timestamp('2'),
info['delete_timestamp'])
self.assertEqual('DELETED', info['status'])
# check meta
m2 = broker.metadata
self.assertEqual(m2, expected_metadata)
# ensure that metadata was cleared by default
do_test({'foo': ['', normalize_timestamp('2')]})
# If the meta is in the brokers delete_meta_whitelist it wont get
# cleared up
do_test(meta, ['foo'])
# delete_meta_whitelist things need to be in lower case, as the keys
# are lower()'ed before checked
meta["X-Container-Meta-Test"] = ['value', normalize_timestamp('0')]
meta["X-Something-else"] = ['other', normalize_timestamp('0')]
do_test({'foo': ['', normalize_timestamp('2')],
'X-Container-Meta-Test': ['value', normalize_timestamp('0')],
'X-Something-else': ['other', normalize_timestamp('0')]},
['x-container-meta-test', 'x-something-else'])
def test_get(self):
broker = DatabaseBroker(self.db_path)
with self.assertRaises(DatabaseConnectionError) as raised, \
broker.get() as conn:
conn.execute('SELECT 1')
self.assertEqual(
str(raised.exception),
"DB connection error (%s, 0):\nDB doesn't exist" % self.db_path)
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with self.assertRaises(DatabaseConnectionError) as raised, \
broker.get() as conn:
conn.execute('SELECT 1')
self.assertEqual(
str(raised.exception),
"DB connection error (%s, 0):\nDB doesn't exist" % broker.db_file)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('CREATE TABLE test (one TEXT)')
try:
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
raise Exception('test')
conn.commit()
except Exception:
pass
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEqual(
[r[0] for r in conn.execute('SELECT * FROM test')], [])
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEqual(
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
with patch('swift.common.db.renamer', lambda a, b,
fsync: b):
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('SELECT * FROM test')
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to malformed database' %
(dbpath, qpath))
# Test malformed schema database
copy(os.path.join(os.path.dirname(__file__),
'malformed_schema_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('SELECT * FROM test')
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to malformed database' %
(dbpath, qpath))
# Test corrupted database
copy(os.path.join(os.path.dirname(__file__),
'corrupted_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
conn.execute('SELECT * FROM test')
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to corrupted database' %
(dbpath, qpath))
def test_get_raw_metadata_missing_container_info(self):
# Test missing container_info/container_stat row
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
'db')
copy(os.path.join(os.path.dirname(__file__),
'missing_container_info.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'container'
with self.assertRaises(sqlite3.DatabaseError) as raised:
broker.get_raw_metadata()
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to missing row in container_stat table' %
(dbpath, qpath))
def test_lock(self):
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
with self.assertRaises(DatabaseConnectionError) as raised, \
broker.lock():
pass
self.assertEqual(
str(raised.exception),
"DB connection error (%s, 0):\nDB doesn't exist" % broker.db_file)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.lock():
pass
with broker.lock():
pass
with self.assertRaises(RuntimeError) as raised, broker.lock():
raise RuntimeError('boom!')
self.assertEqual(raised.exception.args[0], 'boom!')
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'),
timeout=.1)
broker2._initialize = stub
with broker.lock():
# broker2 raises the timeout
with self.assertRaises(LockTimeout) as raised:
with broker2.lock():
pass
self.assertEqual(str(raised.exception),
'0.1 seconds: %s' % broker.db_file)
# and the timeout bubbles up out of broker.lock()
with self.assertRaises(LockTimeout) as raised:
with broker.lock():
with broker2.lock():
pass
self.assertEqual(str(raised.exception),
'0.1 seconds: %s' % broker.db_file)
try:
with broker.lock():
raise Exception('test')
except Exception:
pass
with broker.lock():
pass
def test_newid(self):
broker = DatabaseBroker(self.db_path)
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][0], -1)
self.assertEqual(points[0][1], uuid2)
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
uuid3 = str(uuid4())
broker.newid(uuid3)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][1], uuid3)
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][1], uuid2)
def test_get_items_since(self):
broker = DatabaseBroker(self.db_path)
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.execute('INSERT INTO test (one) VALUES ("3")')
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
self.assertEqual(broker.get_items_since(-1, 10),
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
self.assertEqual(broker.get_items_since(-1, 2),
[{'one': '1'}, {'one': '2'}])
self.assertEqual(broker.get_items_since(1, 2),
[{'one': '2'}, {'one': '3'}])
self.assertEqual(broker.get_items_since(3, 2), [])
self.assertEqual(broker.get_items_since(999, 2), [])
def test_get_sync(self):
broker = DatabaseBroker(self.db_path)
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
pass
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
self.assertEqual(broker.get_sync(uuid2), -1)
broker.newid(uuid2)
self.assertEqual(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
self.assertEqual(broker.get_sync(uuid3), -1)
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.commit()
broker.newid(uuid3)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), 2)
def test_merge_syncs(self):
broker = DatabaseBroker(self.db_path)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
{'sync_point': 4, 'remote_id': uuid3}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 3)
self.assertEqual(broker.get_sync(uuid3, incoming=False), 4)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 5)
# max sync point sticks
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 5)
self.assertEqual(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 4, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 5)
self.assertEqual(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': -1, 'remote_id': uuid2},
{'sync_point': 3, 'remote_id': uuid3}])
self.assertEqual(broker.get_sync(uuid2), 5)
self.assertEqual(broker.get_sync(uuid3), 3)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 3)
self.assertEqual(broker.get_sync(uuid3, incoming=False), 4)
def test_get_replication_info(self):
self.get_replication_info_tester(metadata=False)
def test_get_replication_info_with_metadata(self):
self.get_replication_info_tester(metadata=True)
def get_replication_info_tester(self, metadata=False):
broker = DatabaseBroker(self.db_path, account='a')
broker.db_type = 'test'
broker.db_contains_type = 'test'
broker.db_reclaim_timestamp = 'created_at'
broker_creation = normalize_timestamp(1)
broker_uuid = str(uuid4())
broker_metadata = metadata and json.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp, **kwargs):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript('''
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE,
created_at TEXT
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + 1,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER test_update BEFORE UPDATE ON test
BEGIN
SELECT RAISE(FAIL,
'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - 1,
hash = chexor(hash, old.name, old.created_at);
END;
CREATE TABLE test_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
status_changed_at TEXT DEFAULT '0',
test_count INTEGER,
hash TEXT default '00000000000000000000000000000000',
id TEXT
%s
);
INSERT INTO test_stat (test_count) VALUES (0);
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
conn.execute('''
UPDATE test_stat
SET account = ?, created_at = ?, id = ?, put_timestamp = ?,
status_changed_at = ?
''', (broker.account, broker_creation, broker_uuid, put_timestamp,
put_timestamp))
if metadata:
conn.execute('UPDATE test_stat SET metadata = ?',
(broker_metadata,))
conn.commit()
broker._initialize = _initialize
put_timestamp = normalize_timestamp(2)
broker.initialize(put_timestamp)
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': -1, 'id': broker_uuid, 'metadata': broker_metadata})
insert_timestamp = normalize_timestamp(3)
with broker.get() as conn:
conn.execute('''
INSERT INTO test (name, created_at) VALUES ('test', ?)
''', (insert_timestamp,))
conn.commit()
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 1,
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
with broker.get() as conn:
conn.execute('DELETE FROM test')
conn.commit()
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
return broker
# only testing _reclaim_metadata here
@patch.object(TombstoneReclaimer, 'reclaim')
def test_metadata(self, mock_reclaim):
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
# Add our first item
first_timestamp = normalize_timestamp(1)
first_value = '1'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
# Add our second item
second_timestamp = normalize_timestamp(2)
second_value = '2'
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertIn('Second', broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Update our first item
first_timestamp = normalize_timestamp(3)
first_value = '1b'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertIn('Second', broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Delete our second item (by setting to empty string)
second_timestamp = normalize_timestamp(4)
second_value = ''
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertIn('Second', broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point before second item was deleted
broker.reclaim(normalize_timestamp(3), normalize_timestamp(3))
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertIn('Second', broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point second item was deleted
broker.reclaim(normalize_timestamp(4), normalize_timestamp(4))
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertIn('Second', broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim after point second item was deleted
broker.reclaim(normalize_timestamp(5), normalize_timestamp(5))
self.assertIn('First', broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertNotIn('Second', broker.metadata)
# Delete first item (by setting to empty string)
first_timestamp = normalize_timestamp(6)
broker.update_metadata({'First': ['', first_timestamp]})
self.assertIn('First', broker.metadata)
# Check that sync_timestamp doesn't cause item to be reclaimed
broker.reclaim(normalize_timestamp(5), normalize_timestamp(99))
self.assertIn('First', broker.metadata)
def test_update_metadata_missing_container_info(self):
# Test missing container_info/container_stat row
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
'db')
copy(os.path.join(os.path.dirname(__file__),
'missing_container_info.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'container'
with self.assertRaises(sqlite3.DatabaseError) as raised:
broker.update_metadata({'First': ['1', normalize_timestamp(1)]})
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to missing row in container_stat table' %
(dbpath, qpath))
def test_reclaim_missing_container_info(self):
# Test missing container_info/container_stat row
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
'db')
copy(os.path.join(os.path.dirname(__file__),
'missing_container_info.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'container'
with self.assertRaises(sqlite3.DatabaseError) as raised, \
broker.get() as conn:
broker._reclaim_metadata(conn, 0)
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to missing row in container_stat table' %
(dbpath, qpath))
@patch.object(DatabaseBroker, 'validate_metadata')
def test_validate_metadata_is_called_from_update_metadata(self, mock):
broker = self.get_replication_info_tester(metadata=True)
first_timestamp = normalize_timestamp(1)
first_value = '1'
metadata = {'First': [first_value, first_timestamp]}
broker.update_metadata(metadata, validate_metadata=True)
self.assertTrue(mock.called)
@patch.object(DatabaseBroker, 'validate_metadata')
def test_validate_metadata_is_not_called_from_update_metadata(self, mock):
broker = self.get_replication_info_tester(metadata=True)
first_timestamp = normalize_timestamp(1)
first_value = '1'
metadata = {'First': [first_value, first_timestamp]}
broker.update_metadata(metadata)
self.assertFalse(mock.called)
def test_metadata_with_max_count(self):
metadata = {}
for c in range(MAX_META_COUNT):
key = 'X-Account-Meta-F{0}'.format(c)
metadata[key] = ('B', normalize_timestamp(1))
key = 'X-Account-Meta-Foo'
metadata[key] = ('', normalize_timestamp(1))
self.assertIsNone(DatabaseBroker.validate_metadata(metadata))
def test_metadata_raises_exception_on_non_utf8(self):
def try_validate(metadata):
with self.assertRaises(HTTPException) as raised:
DatabaseBroker.validate_metadata(metadata)
self.assertEqual(str(raised.exception), '400 Bad Request')
ts = normalize_timestamp(1)
try_validate({'X-Account-Meta-Foo': (b'\xff', ts)})
try_validate({b'X-Container-Meta-\xff': ('bar', ts)})
def test_metadata_raises_exception_over_max_count(self):
metadata = {}
for c in range(MAX_META_COUNT + 1):
key = 'X-Account-Meta-F{0}'.format(c)
metadata[key] = ('B', normalize_timestamp(1))
message = ''
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException as e:
message = str(e)
self.assertEqual(message, '400 Bad Request')
def test_metadata_with_max_overall_size(self):
metadata = {}
metadata_value = 'v' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (MAX_META_OVERALL_SIZE - 4
- MAX_META_VALUE_LENGTH):
size += 4 + MAX_META_VALUE_LENGTH
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
normalize_timestamp(1))
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
metadata['X-Account-Meta-k'] = (
'v' * (MAX_META_OVERALL_SIZE - size - 1),
normalize_timestamp(1))
self.assertIsNone(DatabaseBroker.validate_metadata(metadata))
def test_metadata_raises_exception_over_max_overall_size(self):
metadata = {}
metadata_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (MAX_META_OVERALL_SIZE - 4
- MAX_META_VALUE_LENGTH):
size += 4 + MAX_META_VALUE_LENGTH
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
normalize_timestamp(1))
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
metadata['X-Account-Meta-k'] = (
'v' * (MAX_META_OVERALL_SIZE - size - 1),
normalize_timestamp(1))
metadata['X-Account-Meta-k2'] = ('v', normalize_timestamp(1))
message = ''
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException as e:
message = str(e)
self.assertEqual(message, '400 Bad Request')
def test_possibly_quarantine_db_errors(self):
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
# Data is a list of Excpetions to be raised and expected values in the
# log
data = [
(sqlite3.DatabaseError('database disk image is malformed'),
'malformed'),
(sqlite3.DatabaseError('malformed database schema'), 'malformed'),
(sqlite3.DatabaseError('file is encrypted or is not a database'),
'corrupted'),
(sqlite3.OperationalError('disk I/O error'),
'disk error while accessing')]
for i, (ex, hint) in enumerate(data):
mkdirs(dbpath)
broker = DatabaseBroker(os.path.join(dbpath, '%d.db' % (i)))
broker.db_type = 'test'
try:
raise ex
except sqlite3.DatabaseError:
with self.assertRaises(sqlite3.DatabaseError) as raised:
broker.possibly_quarantine(*sys.exc_info())
self.assertEqual(
str(raised.exception),
'Quarantined %s to %s due to %s database' %
(dbpath, qpath, hint))
def test_skip_commits(self):
broker = DatabaseBroker(self.db_path)
self.assertTrue(broker._skip_commit_puts())
broker._initialize = MagicMock()
broker.initialize(Timestamp.now())
self.assertTrue(broker._skip_commit_puts())
# not initialized
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertFalse(os.path.exists(broker.db_file)) # sanity check
self.assertTrue(broker._skip_commit_puts())
# no pending file
broker._initialize = MagicMock()
broker.initialize(Timestamp.now())
self.assertTrue(os.path.exists(broker.db_file)) # sanity check
self.assertFalse(os.path.exists(broker.pending_file)) # sanity check
self.assertTrue(broker._skip_commit_puts())
# pending file exists
with open(broker.pending_file, 'wb'):
pass
self.assertTrue(os.path.exists(broker.pending_file)) # sanity check
self.assertFalse(broker._skip_commit_puts())
# skip_commits is True
broker.skip_commits = True
self.assertTrue(broker._skip_commit_puts())
# re-init
broker = DatabaseBroker(db_file)
self.assertFalse(broker._skip_commit_puts())
# constructor can override
broker = DatabaseBroker(db_file, skip_commits=True)
self.assertTrue(broker._skip_commit_puts())
def test_commit_puts(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
broker._initialize = MagicMock()
broker.initialize(Timestamp.now())
with open(broker.pending_file, 'wb'):
pass
# merge given list
with patch.object(broker, 'merge_items') as mock_merge_items:
broker._commit_puts(['test'])
mock_merge_items.assert_called_once_with(['test'])
# load file and merge
with open(broker.pending_file, 'wb') as fd:
for v in (1, 2, 99):
fd.write(b':' + base64.b64encode(pickle.dumps(
v, protocol=PICKLE_PROTOCOL)))
with patch.object(broker, 'merge_items') as mock_merge_items:
broker._commit_puts_load = lambda l, e: l.append(e)
broker._commit_puts()
mock_merge_items.assert_called_once_with([1, 2, 99])
self.assertEqual(0, os.path.getsize(broker.pending_file))
# load file and merge with given list
with open(broker.pending_file, 'wb') as fd:
fd.write(b':' + base64.b64encode(pickle.dumps(
b'bad', protocol=PICKLE_PROTOCOL)))
with patch.object(broker, 'merge_items') as mock_merge_items:
broker._commit_puts_load = lambda l, e: l.append(e)
broker._commit_puts([b'not'])
mock_merge_items.assert_called_once_with([b'not', b'bad'])
self.assertEqual(0, os.path.getsize(broker.pending_file))
# load a pending entry that's caused trouble in py2/py3 upgrade tests
# can't quite figure out how it got generated, though, so hard-code it
with open(broker.pending_file, 'wb') as fd:
fd.write(b':gAIoVS3olIngpILrjIvrjIvpkIngpIHlmIjlmIbjnIbgp'
b'IPjnITimIPvhI/rjI3tiI5xAVUQMTU1OTI0MTg0Ni40NjY'
b'wMXECVQEwVQEwVQEwSwBVATB0Lg==')
with patch.object(broker, 'merge_items') as mock_merge_items:
broker._commit_puts_load = lambda l, e: l.append(e)
broker._commit_puts([])
expected_name = (u'\u8509\u0902\ub30b\ub30b\u9409\u0901\u5608\u5606'
u'\u3706\u0903\u3704\u2603\uf10f\ub30d\ud20e')
if six.PY2:
expected_name = expected_name.encode('utf8')
mock_merge_items.assert_called_once_with([
(expected_name, '1559241846.46601', '0', '0', '0', 0, '0')])
self.assertEqual(0, os.path.getsize(broker.pending_file))
# skip_commits True - no merge
db_file = os.path.join(self.testdir, '2.db')
broker = DatabaseBroker(db_file, skip_commits=True)
broker._initialize = MagicMock()
broker.initialize(Timestamp.now())
with open(broker.pending_file, 'wb') as fd:
fd.write(b':ignored')
with patch.object(broker, 'merge_items') as mock_merge_items:
with self.assertRaises(DatabaseConnectionError) as cm:
broker._commit_puts([b'hmmm'])
mock_merge_items.assert_not_called()
self.assertIn('commits not accepted', str(cm.exception))
with open(broker.pending_file, 'rb') as fd:
self.assertEqual(b':ignored', fd.read())
def test_put_record(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
broker._initialize = MagicMock()
broker.initialize(Timestamp.now())
# pending file created and record written
broker.make_tuple_for_pickle = lambda x: x.upper()
with patch.object(broker, '_commit_puts') as mock_commit_puts:
broker.put_record('pinky')
mock_commit_puts.assert_not_called()
with open(broker.pending_file, 'rb') as fd:
pending = fd.read()
items = pending.split(b':')
self.assertEqual(['PINKY'],
[pickle.loads(base64.b64decode(i))
for i in items[1:]])
# record appended
with patch.object(broker, '_commit_puts') as mock_commit_puts:
broker.put_record('perky')
mock_commit_puts.assert_not_called()
with open(broker.pending_file, 'rb') as fd:
pending = fd.read()
items = pending.split(b':')
self.assertEqual(['PINKY', 'PERKY'],
[pickle.loads(base64.b64decode(i))
for i in items[1:]])
# pending file above cap
cap = swift.common.db.PENDING_CAP
while os.path.getsize(broker.pending_file) < cap:
with open(broker.pending_file, 'ab') as fd:
fd.write(b'x' * 100000)
with patch.object(broker, '_commit_puts') as mock_commit_puts:
broker.put_record('direct')
mock_commit_puts.assert_called_once_with(['direct'])
# records shouldn't be put to brokers with skip_commits True because
# they cannot be accepted if the pending file is full
broker.skip_commits = True
with open(broker.pending_file, 'wb'):
# empty the pending file
pass
with patch.object(broker, '_commit_puts') as mock_commit_puts:
with self.assertRaises(DatabaseConnectionError) as cm:
broker.put_record('unwelcome')
self.assertIn('commits not accepted', str(cm.exception))
mock_commit_puts.assert_not_called()
with open(broker.pending_file, 'rb') as fd:
pending = fd.read()
self.assertFalse(pending)
class TestTombstoneReclaimer(TestDbBase):
def _make_object(self, broker, obj_name, ts, deleted):
if deleted:
broker.delete_test(obj_name, ts.internal)
else:
broker.put_test(obj_name, ts.internal)
def _count_reclaimable(self, conn, reclaim_age):
return conn.execute(
"SELECT count(*) FROM test "
"WHERE deleted = 1 AND created_at < ?", (reclaim_age,)
).fetchone()[0]
def _get_reclaimable(self, broker, reclaim_age):
with broker.get() as conn:
return self._count_reclaimable(conn, reclaim_age)
def _setup_tombstones(self, reverse_names=True):
broker = ExampleBroker(self.db_path,
account='test_account',
container='test_container')
broker.initialize(Timestamp('1').internal, 0)
now = time.time()
top_of_the_minute = now - (now % 60)
# namespace if reverse:
# a-* has 70 'active' tombstones followed by 70 reclaimable
# b-* has 70 'active' tombstones followed by 70 reclaimable
# else:
# a-* has 70 reclaimable followed by 70 'active' tombstones
# b-* has 70 reclaimable followed by 70 'active' tombstones
for i in range(0, 560, 4):
self._make_object(
broker, 'a_%3d' % (560 - i if reverse_names else i),
Timestamp(top_of_the_minute - (i * 60)), True)
self._make_object(
broker, 'a_%3d' % (559 - i if reverse_names else i + 1),
Timestamp(top_of_the_minute - ((i + 1) * 60)), False)
self._make_object(
broker, 'b_%3d' % (560 - i if reverse_names else i),
Timestamp(top_of_the_minute - ((i + 2) * 60)), True)
self._make_object(
broker, 'b_%3d' % (559 - i if reverse_names else i + 1),
Timestamp(top_of_the_minute - ((i + 3) * 60)), False)
broker._commit_puts()
# divide the set of timestamps exactly in half for reclaim
reclaim_age = top_of_the_minute + 1 - (560 / 2 * 60)
self.assertEqual(140, self._get_reclaimable(broker, reclaim_age))
tombstones = self._get_reclaimable(broker, top_of_the_minute + 1)
self.assertEqual(280, tombstones)
return broker, top_of_the_minute, reclaim_age
@contextlib.contextmanager
def _mock_broker_get(self, broker, reclaim_age):
# intercept broker.get() calls and capture the current reclaimable
# count before returning a conn
orig_get = broker.get
reclaimable = []
@contextlib.contextmanager
def mock_get():
with orig_get() as conn:
reclaimable.append(self._count_reclaimable(conn, reclaim_age))
yield conn
with patch.object(broker, 'get', mock_get):
yield reclaimable
def test_batched_reclaim_several_small_batches(self):
broker, totm, reclaim_age = self._setup_tombstones()
with self._mock_broker_get(broker, reclaim_age) as reclaimable:
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 50):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
reclaimer.reclaim()
expected_reclaimable = [140, # 0 rows fetched
90, # 50 rows fetched, 50 reclaimed
70, # 100 rows fetched, 20 reclaimed
60, # 150 rows fetched, 10 reclaimed
10, # 200 rows fetched, 50 reclaimed
0, # 250 rows fetched, 10 reclaimed
]
self.assertEqual(expected_reclaimable, reclaimable)
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
def test_batched_reclaim_exactly_two_batches(self):
broker, totm, reclaim_age = self._setup_tombstones()
with self._mock_broker_get(broker, reclaim_age) as reclaimable:
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 140):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
reclaimer.reclaim()
expected_reclaimable = [140, # 0 rows fetched
70, # 140 rows fetched, 70 reclaimed
]
self.assertEqual(expected_reclaimable, reclaimable)
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
def test_batched_reclaim_one_large_batch(self):
broker, totm, reclaim_age = self._setup_tombstones()
with self._mock_broker_get(broker, reclaim_age) as reclaimable:
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 1000):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
reclaimer.reclaim()
expected_reclaimable = [140] # 0 rows fetched
self.assertEqual(expected_reclaimable, reclaimable)
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
def test_reclaim_get_tombstone_count(self):
broker, totm, reclaim_age = self._setup_tombstones(reverse_names=False)
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 122):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
reclaimer.reclaim()
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
tombstones = self._get_reclaimable(broker, totm + 1)
self.assertEqual(140, tombstones)
# in this scenario the reclaim phase finds the remaining tombstone
# count (140)
self.assertEqual(140, reclaimer.remaining_tombstones)
self.assertEqual(140, reclaimer.get_tombstone_count())
def test_reclaim_get_tombstone_count_with_leftover(self):
broker, totm, reclaim_age = self._setup_tombstones()
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 122):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
reclaimer.reclaim()
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
tombstones = self._get_reclaimable(broker, totm + 1)
self.assertEqual(140, tombstones)
# in this scenario the reclaim phase finds a subset (104) of all
# tombstones (140)
self.assertEqual(104, reclaimer.remaining_tombstones)
# get_tombstone_count finds the rest
actual = reclaimer.get_tombstone_count()
self.assertEqual(140, actual)
def test_get_tombstone_count_with_leftover(self):
# verify that a call to get_tombstone_count() will invoke a reclaim if
# reclaim not already invoked
broker, totm, reclaim_age = self._setup_tombstones()
with patch('swift.common.db.RECLAIM_PAGE_SIZE', 122):
reclaimer = TombstoneReclaimer(broker, reclaim_age)
actual = reclaimer.get_tombstone_count()
self.assertEqual(0, self._get_reclaimable(broker, reclaim_age))
self.assertEqual(140, actual)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_db.py |
# Copyright (c) 2010-2022 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
from io import BytesIO
import json
import mock
import types
import unittest
import eventlet.wsgi as wsgi
import six
from test.debug_logger import debug_logger
from swift.common import http_protocol, swob
class TestSwiftHttpProtocol(unittest.TestCase):
def _proto_obj(self):
# Make an object we can exercise... note the base class's __init__()
# does a bunch of work, so we just new up an object like eventlet.wsgi
# does.
proto_class = http_protocol.SwiftHttpProtocol
try:
the_obj = types.InstanceType(proto_class)
except AttributeError:
the_obj = proto_class.__new__(proto_class)
# Install some convenience mocks
the_obj.server = Namespace(app=Namespace(logger=mock.Mock()),
url_length_limit=777,
log=mock.Mock())
the_obj.send_error = mock.Mock()
return the_obj
def test_swift_http_protocol_log_request(self):
proto_obj = self._proto_obj()
self.assertEqual(None, proto_obj.log_request('ignored'))
def test_swift_http_protocol_log_message(self):
proto_obj = self._proto_obj()
proto_obj.log_message('a%sc', 'b')
self.assertEqual([mock.call.error('ERROR WSGI: a%sc', 'b')],
proto_obj.server.app.logger.mock_calls)
def test_swift_http_protocol_log_message_no_logger(self):
# If the app somehow had no logger attribute or it was None, don't blow
# up
proto_obj = self._proto_obj()
delattr(proto_obj.server.app, 'logger')
proto_obj.log_message('a%sc', 'b')
self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')],
proto_obj.server.log.mock_calls)
proto_obj.server.log.reset_mock()
proto_obj.server.app.logger = None
proto_obj.log_message('a%sc', 'b')
self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')],
proto_obj.server.log.mock_calls)
def test_swift_http_protocol_parse_request_no_proxy(self):
proto_obj = self._proto_obj()
proto_obj.raw_requestline = b'jimmy jam'
proto_obj.client_address = ('a', '123')
self.assertEqual(False, proto_obj.parse_request())
self.assertEqual([
mock.call(400, "Bad HTTP/0.9 request type ('jimmy')"),
], proto_obj.send_error.mock_calls)
self.assertEqual(('a', '123'), proto_obj.client_address)
def test_bad_request_line(self):
proto_obj = self._proto_obj()
proto_obj.raw_requestline = b'None //'
self.assertEqual(False, proto_obj.parse_request())
class ProtocolTest(unittest.TestCase):
def _run_bytes_through_protocol(self, bytes_from_client, app=None):
rfile = BytesIO(bytes_from_client)
wfile = BytesIO()
# All this fakery is needed to make the WSGI server process one
# connection, possibly with multiple requests, in the main
# greenthread. It doesn't hurt correctness if the function is called
# in a separate greenthread, but it makes using the debugger harder.
class FakeGreenthread(object):
def link(self, a_callable, *args):
a_callable(self, *args)
class FakePool(object):
def spawn(self, a_callable, *args, **kwargs):
a_callable(*args, **kwargs)
return FakeGreenthread()
def spawn_n(self, a_callable, *args, **kwargs):
a_callable(*args, **kwargs)
def waitall(self):
pass
addr = ('127.0.0.1', 8359)
fake_tcp_socket = mock.Mock(
setsockopt=lambda *a: None,
makefile=lambda mode, bufsize: rfile if 'r' in mode else wfile,
getsockname=lambda *a: addr
)
fake_listen_socket = mock.Mock(
accept=mock.MagicMock(
side_effect=[[fake_tcp_socket, addr],
# KeyboardInterrupt breaks the WSGI server out of
# its infinite accept-process-close loop.
KeyboardInterrupt]),
getsockname=lambda *a: addr)
del fake_listen_socket.do_handshake
# If we let the WSGI server close rfile/wfile then we can't access
# their contents any more.
self.logger = debug_logger('proxy')
with mock.patch.object(wfile, 'close', lambda: None), \
mock.patch.object(rfile, 'close', lambda: None):
wsgi.server(
fake_listen_socket, app or self.app,
protocol=self.protocol_class,
custom_pool=FakePool(),
log=self.logger,
log_output=True,
)
return wfile.getvalue()
class TestSwiftHttpProtocolSomeMore(ProtocolTest):
protocol_class = http_protocol.SwiftHttpProtocol
@staticmethod
def app(env, start_response):
start_response("200 OK", [])
return [swob.wsgi_to_bytes(env['RAW_PATH_INFO'])]
def test_simple(self):
bytes_out = self._run_bytes_through_protocol(
b"GET /someurl HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check
self.assertEqual(lines[-1], b'/someurl')
def test_quoted(self):
bytes_out = self._run_bytes_through_protocol(
b"GET /some%fFpath%D8%AA HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check
self.assertEqual(lines[-1], b'/some%fFpath%D8%AA')
def test_messy(self):
bytes_out = self._run_bytes_through_protocol(
b"GET /oh\xffboy%what$now%E2%80%bd HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'/oh\xffboy%what$now%E2%80%bd')
def test_absolute_target(self):
bytes_out = self._run_bytes_through_protocol((
b"GET https://cluster.domain/bucket/key HTTP/1.0\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'/bucket/key')
bytes_out = self._run_bytes_through_protocol((
b"GET http://cluster.domain/v1/acct/cont/obj HTTP/1.0\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'/v1/acct/cont/obj')
# clients talking nonsense
bytes_out = self._run_bytes_through_protocol((
b"GET ftp://cluster.domain/bucket/key HTTP/1.0\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'ftp://cluster.domain/bucket/key')
bytes_out = self._run_bytes_through_protocol((
b"GET https://cluster.domain HTTP/1.0\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'https://cluster.domain')
bytes_out = self._run_bytes_through_protocol((
b"GET http:omg//wtf/bbq HTTP/1.0\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'http:omg//wtf/bbq')
def test_bad_request(self):
bytes_out = self._run_bytes_through_protocol((
b"ONLY-METHOD\r\n"
b"Server: example.com\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(
lines[0], b"HTTP/1.1 400 Bad request syntax ('ONLY-METHOD')")
self.assertIn(b"Bad request syntax or unsupported method.", lines[-1])
self.assertIn(b"X-Trans-Id", lines[6])
self.assertIn(b"X-Openstack-Request-Id", lines[7])
self.assertIn("wsgi starting up", info_lines[0])
self.assertIn("ERROR WSGI: code 400", info_lines[1])
self.assertIn("txn:", info_lines[1])
def test_bad_request_server_logging(self):
with mock.patch('swift.common.http_protocol.generate_trans_id',
return_value='test-trans-id'):
bytes_out = self._run_bytes_through_protocol(
b"ONLY-METHOD\r\n"
b"Server: example.com\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(
lines[0], b"HTTP/1.1 400 Bad request syntax ('ONLY-METHOD')")
self.assertIn(b"Bad request syntax or unsupported method.", lines[-1])
self.assertIn(b"X-Trans-Id: test-trans-id", lines[6])
self.assertIn(b"X-Openstack-Request-Id: test-trans-id", lines[7])
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(
"ERROR WSGI: code 400, message "
"Bad request syntax ('ONLY-METHOD'), (txn: test-trans-id)",
info_lines[1])
def test_bad_request_app_logging(self):
app_logger = debug_logger()
app = mock.MagicMock()
app.logger = app_logger
with mock.patch('swift.common.http_protocol.generate_trans_id',
return_value='test-trans-id'):
bytes_out = self._run_bytes_through_protocol((
b"ONLY-METHOD\r\n"
b"Server: example.com\r\n"
b"\r\n"
), app=app)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(
lines[0], b"HTTP/1.1 400 Bad request syntax ('ONLY-METHOD')")
self.assertIn(b"Bad request syntax or unsupported method.", lines[-1])
self.assertIn(b"X-Trans-Id: test-trans-id", lines[6])
self.assertIn(b"X-Openstack-Request-Id: test-trans-id", lines[7])
self.assertEqual(1, len(app_logger.records.get('ERROR', [])))
self.assertIn(
"ERROR WSGI: code 400, message Bad request syntax ('ONLY-METHOD') "
"(txn: test-trans-id)",
app_logger.records.get('ERROR')[0])
# but we can at least assert that the logger txn_id was set
self.assertEqual('test-trans-id', app_logger.txn_id)
def test_leading_slashes(self):
bytes_out = self._run_bytes_through_protocol((
b"GET ///some-leading-slashes HTTP/1.0\r\n"
b"User-Agent: blah blah blah\r\n"
b"\r\n"
))
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[-1], b'///some-leading-slashes')
def test_request_lines(self):
def app(env, start_response):
start_response("200 OK", [])
if six.PY2:
return [json.dumps({
'RAW_PATH_INFO': env['RAW_PATH_INFO'].decode('latin1'),
'QUERY_STRING': (None if 'QUERY_STRING' not in env else
env['QUERY_STRING'].decode('latin1')),
}).encode('ascii')]
return [json.dumps({
'RAW_PATH_INFO': env['RAW_PATH_INFO'],
'QUERY_STRING': env.get('QUERY_STRING'),
}).encode('ascii')]
def do_test(request_line, expected):
bytes_out = self._run_bytes_through_protocol(
request_line + b'\r\n\r\n',
app,
)
print(bytes_out)
resp_body = bytes_out.partition(b'\r\n\r\n')[2]
self.assertEqual(json.loads(resp_body), expected)
do_test(b'GET / HTTP/1.1', {
'RAW_PATH_INFO': u'/',
'QUERY_STRING': None,
})
do_test(b'GET /%FF HTTP/1.1', {
'RAW_PATH_INFO': u'/%FF',
'QUERY_STRING': None,
})
do_test(b'GET /\xff HTTP/1.1', {
'RAW_PATH_INFO': u'/\xff',
'QUERY_STRING': None,
})
do_test(b'PUT /Here%20Is%20A%20SnowMan:\xe2\x98\x83 HTTP/1.0', {
'RAW_PATH_INFO': u'/Here%20Is%20A%20SnowMan:\xe2\x98\x83',
'QUERY_STRING': None,
})
do_test(
b'POST /?and%20it=does+nothing+to+params&'
b'PALMTREE=\xf0%9f\x8c%b4 HTTP/1.1', {
'RAW_PATH_INFO': u'/',
'QUERY_STRING': (u'and%20it=does+nothing+to+params'
u'&PALMTREE=\xf0%9f\x8c%b4'),
}
)
do_test(b'GET // HTTP/1.1', {
'RAW_PATH_INFO': u'//',
'QUERY_STRING': None,
})
do_test(b'GET //bar HTTP/1.1', {
'RAW_PATH_INFO': u'//bar',
'QUERY_STRING': None,
})
do_test(b'GET //////baz HTTP/1.1', {
'RAW_PATH_INFO': u'//////baz',
'QUERY_STRING': None,
})
class TestProxyProtocol(ProtocolTest):
protocol_class = http_protocol.SwiftHttpProxiedProtocol
@staticmethod
def app(env, start_response):
start_response("200 OK", [])
body = '\r\n'.join([
'got addr: %s %s' % (
env.get("REMOTE_ADDR", "<missing>"),
env.get("REMOTE_PORT", "<missing>")),
'on addr: %s %s' % (
env.get("SERVER_ADDR", "<missing>"),
env.get("SERVER_PORT", "<missing>")),
'https is %s (scheme %s)' % (
env.get("HTTPS", "<missing>"),
env.get("wsgi.url_scheme", "<missing>")),
]) + '\r\n'
return [body.encode("utf-8")]
def test_request_with_proxy(self):
bytes_out = self._run_bytes_through_protocol(
b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 4433\r\n"
b"GET /someurl HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check
self.assertEqual(lines[-3:], [
b"got addr: 192.168.0.1 56423",
b"on addr: 192.168.0.11 4433",
b"https is <missing> (scheme http)",
])
def test_request_with_proxy_https(self):
bytes_out = self._run_bytes_through_protocol(
b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n"
b"GET /someurl HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check
self.assertEqual(lines[-3:], [
b"got addr: 192.168.0.1 56423",
b"on addr: 192.168.0.11 443",
b"https is on (scheme https)",
])
def test_multiple_requests_with_proxy(self):
bytes_out = self._run_bytes_through_protocol(
b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n"
b"GET /someurl HTTP/1.1\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
b"GET /otherurl HTTP/1.1\r\n"
b"User-Agent: something or other\r\n"
b"Connection: close\r\n"
b"\r\n"
)
lines = bytes_out.split(b"\r\n")
self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check
# the address in the PROXY line is applied to every request
addr_lines = [l for l in lines if l.startswith(b"got addr")]
self.assertEqual(addr_lines, [b"got addr: 192.168.0.1 56423"] * 2)
addr_lines = [l for l in lines if l.startswith(b"on addr")]
self.assertEqual(addr_lines, [b"on addr: 192.168.0.11 443"] * 2)
addr_lines = [l for l in lines if l.startswith(b"https is")]
self.assertEqual(addr_lines, [b"https is on (scheme https)"] * 2)
def test_missing_proxy_line(self):
with mock.patch('swift.common.http_protocol.generate_trans_id',
return_value='test-bad-req-trans-id'):
bytes_out = self._run_bytes_through_protocol(
# whoops, no PROXY line here
b"GET /someurl HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n"
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
info_lines = self.logger.get_lines_for_level('info')
self.assertEqual(
lines[0],
b"HTTP/1.1 400 Invalid PROXY line 'GET /someurl HTTP/1.0\\r\\n'")
self.assertIn(b"X-Trans-Id: test-bad-req-trans-id", lines[6])
self.assertIn(b"X-Openstack-Request-Id: test-bad-req-trans-id",
lines[7])
self.assertEqual(
"ERROR WSGI: code 400, message Invalid PROXY line "
"'GET /someurl HTTP/1.0\\r\\n', "
"(txn: test-bad-req-trans-id)",
info_lines[1])
def test_malformed_proxy_lines(self):
for bad_line in [b'PROXY jojo',
b'PROXYjojo a b c d e',
b'PROXY a b c d e', # bad INET protocol and family
]:
bytes_out = self._run_bytes_through_protocol(bad_line)
lines = [l for l in bytes_out.split(b"\r\n") if l]
info_lines = self.logger.get_lines_for_level('info')
self.assertIn(b"400 Invalid PROXY line", lines[0])
self.assertIn(b"X-Trans-Id", lines[6])
self.assertIn(b"X-Openstack-Request-Id", lines[7])
self.assertIn("wsgi starting up", info_lines[0])
self.assertIn("txn:", info_lines[1])
def test_unknown_client_addr(self):
# For "UNKNOWN", the rest of the line before the CRLF may be omitted by
# the sender, and the receiver must ignore anything presented before
# the CRLF is found.
for unknown_line in [b'PROXY UNKNOWN', # mimimal valid unknown
b'PROXY UNKNOWNblahblah', # also valid
b'PROXY UNKNOWN a b c d']:
bytes_out = self._run_bytes_through_protocol(
unknown_line + (b"\r\n"
b"GET /someurl HTTP/1.0\r\n"
b"User-Agent: something or other\r\n"
b"\r\n")
)
lines = [l for l in bytes_out.split(b"\r\n") if l]
self.assertIn(b"200 OK", lines[0])
def test_address_and_environ(self):
# Make an object we can exercise... note the base class's __init__()
# does a bunch of work, so we just new up an object like eventlet.wsgi
# does.
dummy_env = {'OTHER_ENV_KEY': 'OTHER_ENV_VALUE'}
mock_protocol = mock.Mock(get_environ=lambda s: dummy_env)
patcher = mock.patch(
'swift.common.http_protocol.SwiftHttpProtocol', mock_protocol
)
self.mock_super = patcher.start()
self.addCleanup(patcher.stop)
proto_class = http_protocol.SwiftHttpProxiedProtocol
try:
proxy_obj = types.InstanceType(proto_class)
except AttributeError:
proxy_obj = proto_class.__new__(proto_class)
# Install some convenience mocks
proxy_obj.server = Namespace(app=Namespace(logger=mock.Mock()),
url_length_limit=777,
log=mock.Mock())
proxy_obj.send_error = mock.Mock()
proxy_obj.rfile = BytesIO(
b'PROXY TCP4 111.111.111.111 222.222.222.222 111 222'
)
assert proxy_obj.handle()
self.assertEqual(proxy_obj.client_address, ('111.111.111.111', '111'))
self.assertEqual(proxy_obj.proxy_address, ('222.222.222.222', '222'))
expected_env = {
'SERVER_PORT': '222',
'SERVER_ADDR': '222.222.222.222',
'OTHER_ENV_KEY': 'OTHER_ENV_VALUE'
}
self.assertEqual(proxy_obj.get_environ(), expected_env)
| swift-master | test/unit/common/test_http_protocol.py |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import bytes_to_wsgi
class TestHeaderKeyDict(unittest.TestCase):
def test_case_insensitive(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
headers['CONTENT-LENGTH'] = 10
headers['content-length'] = 20
self.assertEqual(headers['Content-Length'], '20')
self.assertEqual(headers['content-length'], '20')
self.assertEqual(headers['CONTENT-LENGTH'], '20')
def test_unicode(self):
def mkstr(prefix):
return bytes_to_wsgi((prefix + u'\U0001f44d').encode('utf8'))
headers = HeaderKeyDict()
headers[mkstr('x-object-meta-')] = 'ok'
self.assertIn(mkstr('x-object-meta-'), headers)
self.assertIn(mkstr('X-Object-Meta-'), headers)
self.assertIn(mkstr('X-OBJECT-META-'), headers)
keys = list(headers)
self.assertNotIn(mkstr('x-object-meta-'), keys)
self.assertIn(mkstr('X-Object-Meta-'), keys)
self.assertNotIn(mkstr('X-OBJECT-META-'), keys)
def test_setdefault(self):
headers = HeaderKeyDict()
# it gets set
headers.setdefault('x-rubber-ducky', 'the one')
self.assertEqual(headers['X-Rubber-Ducky'], 'the one')
# it has the right return value
ret = headers.setdefault('x-boat', 'dinghy')
self.assertEqual(ret, 'dinghy')
ret = headers.setdefault('x-boat', 'yacht')
self.assertEqual(ret, 'dinghy')
# shouldn't crash
headers.setdefault('x-sir-not-appearing-in-this-request', None)
def test_del_contains(self):
headers = HeaderKeyDict()
headers['Content-Length'] = 0
self.assertIn('Content-Length', headers)
del headers['Content-Length']
self.assertNotIn('Content-Length', headers)
def test_update(self):
headers = HeaderKeyDict()
headers.update({'Content-Length': '0'})
headers.update([('Content-Type', 'text/plain')])
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(headers['Content-Type'], 'text/plain')
def test_set_none(self):
headers = HeaderKeyDict()
headers['test'] = None
self.assertNotIn('test', headers)
headers['test'] = 'something'
self.assertEqual('something', headers['test']) # sanity check
headers['test'] = None
self.assertNotIn('test', headers)
def test_init_from_dict(self):
headers = HeaderKeyDict({'Content-Length': 20,
'Content-Type': 'text/plain'})
self.assertEqual('20', headers['Content-Length'])
self.assertEqual('text/plain', headers['Content-Type'])
headers = HeaderKeyDict(headers)
self.assertEqual('20', headers['Content-Length'])
self.assertEqual('text/plain', headers['Content-Type'])
def test_set(self):
# mappings = ((<tuple of input vals>, <expected output val>), ...)
mappings = (((1.618, '1.618', b'1.618', u'1.618'), '1.618'),
((20, '20', b'20', u'20'), '20'),
((True, 'True', b'True', u'True'), 'True'),
((False, 'False', b'False', u'False'), 'False'))
for vals, expected in mappings:
for val in vals:
headers = HeaderKeyDict(test=val)
actual = headers['test']
self.assertEqual(expected, actual,
'Expected %s but got %s for val %s' %
(expected, actual, val))
self.assertIsInstance(
actual, str,
'Expected type str but got %s for val %s of type %s' %
(type(actual), val, type(val)))
def test_get(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
self.assertEqual(headers.get('CONTENT-LENGTH'), '20')
self.assertIsNone(headers.get('something-else'))
self.assertEqual(headers.get('something-else', True), True)
def test_keys(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
headers['cOnTent-tYpe'] = 'text/plain'
headers['SomeThing-eLse'] = 'somevalue'
self.assertEqual(
set(headers.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
def test_pop(self):
headers = HeaderKeyDict()
headers['content-length'] = 20
headers['cOntent-tYpe'] = 'text/plain'
self.assertEqual(headers.pop('content-Length'), '20')
self.assertEqual(headers.pop('Content-type'), 'text/plain')
self.assertEqual(headers.pop('Something-Else', 'somevalue'),
'somevalue')
| swift-master | test/unit/common/test_header_key_dict.py |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift.common.splice`'''
import os
import errno
import ctypes
import logging
import tempfile
import unittest
import contextlib
import re
import mock
import six
from swift.common.splice import splice, tee
LOGGER = logging.getLogger(__name__)
def NamedTemporaryFile():
'''Wrapper to tempfile.NamedTemporaryFile() disabling bufferring.
The wrapper is used to support Python 2 and Python 3 in the same
code base.
'''
if six.PY3:
return tempfile.NamedTemporaryFile(buffering=0)
else:
return tempfile.NamedTemporaryFile(bufsize=0)
def safe_close(fd):
'''Close a file descriptor, ignoring any exceptions'''
try:
os.close(fd)
except Exception:
LOGGER.exception('Error while closing FD')
@contextlib.contextmanager
def pipe():
'''Context-manager providing 2 ends of a pipe, closing them at exit'''
fds = os.pipe()
try:
yield fds
finally:
safe_close(fds[0])
safe_close(fds[1])
class TestSplice(unittest.TestCase):
'''Tests for `splice`'''
def setUp(self):
if not splice.available:
raise unittest.SkipTest('splice not available')
def test_flags(self):
'''Test flag attribute availability'''
self.assertTrue(hasattr(splice, 'SPLICE_F_MOVE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_NONBLOCK'))
self.assertTrue(hasattr(splice, 'SPLICE_F_MORE'))
self.assertTrue(hasattr(splice, 'SPLICE_F_GIFT'))
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(splice.available)
def test_splice_pipe_to_pipe(self):
'''Test `splice` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = splice(p1a, None, p2b, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 3), b'def')
def test_splice_file_to_pipe(self):
'''Test `splice` from a file to a pipe'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
fd.write(b'abcdef')
fd.seek(0, os.SEEK_SET)
res = splice(fd, None, pb, None, 3, 0)
self.assertEqual(res, (3, None, None))
# `fd.tell()` isn't updated...
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 3)
fd.seek(0, os.SEEK_SET)
res = splice(fd, 3, pb, None, 3, 0)
self.assertEqual(res, (3, 6, None))
self.assertEqual(os.lseek(fd.fileno(), 0, os.SEEK_CUR), 0)
self.assertEqual(os.read(pa, 6), b'abcdef')
def test_splice_pipe_to_file(self):
'''Test `splice` from a pipe to a file'''
with NamedTemporaryFile() as fd:
with pipe() as (pa, pb):
os.write(pb, b'abcdef')
res = splice(pa, None, fd, None, 3, 0)
self.assertEqual(res, (3, None, None))
self.assertEqual(fd.tell(), 3)
fd.seek(0, os.SEEK_SET)
res = splice(pa, None, fd, 3, 3, 0)
self.assertEqual(res, (3, None, 6))
self.assertEqual(fd.tell(), 0)
self.assertEqual(fd.read(6), b'abcdef')
@mock.patch.object(splice, '_c_splice')
def test_fileno(self, mock_splice):
'''Test handling of file-descriptors'''
splice(1, None, 2, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
mock_splice.reset_mock()
with open('/dev/zero', 'r') as fd:
splice(fd, None, fd, None, 3, 0)
self.assertEqual(mock_splice.call_args,
((fd.fileno(), None, fd.fileno(), None, 3, 0),
{}))
@mock.patch.object(splice, '_c_splice')
def test_flags_list(self, mock_splice):
'''Test handling of flag lists'''
splice(1, None, 2, None, 3,
[splice.SPLICE_F_MOVE, splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, flags), {}))
mock_splice.reset_mock()
splice(1, None, 2, None, 3, [])
self.assertEqual(mock_splice.call_args,
((1, None, 2, None, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] splice: %s' % (err, os.strerror(err))
try:
splice(fd, None, fd, None, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.splice._c_splice', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, splice, 1, None, 2, None, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `splice` support'''
class LibC(object):
'''A fake `libc` object tracking `splice` attribute access'''
def __init__(self):
self.splice_retrieved = False
@property
def splice(self):
self.splice_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Splice` instance
# Something you're not supposed to do in actual code
new_splice = type(splice)()
self.assertFalse(new_splice.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.splice_retrieved)
class TestTee(unittest.TestCase):
'''Tests for `tee`'''
def setUp(self):
if not tee.available:
raise unittest.SkipTest('tee not available')
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_available(self):
'''Test `available` attribute correctness'''
self.assertFalse(tee.available)
def test_tee_pipe_to_pipe(self):
'''Test `tee` from a pipe to a pipe'''
with pipe() as (p1a, p1b):
with pipe() as (p2a, p2b):
os.write(p1b, b'abcdef')
res = tee(p1a, p2b, 3, 0)
self.assertEqual(res, 3)
self.assertEqual(os.read(p2a, 3), b'abc')
self.assertEqual(os.read(p1a, 6), b'abcdef')
@mock.patch.object(tee, '_c_tee')
def test_fileno(self, mock_tee):
'''Test handling of file-descriptors'''
with pipe() as (pa, pb):
tee(pa, pb, 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
mock_tee.reset_mock()
tee(os.fdopen(pa, 'r'), os.fdopen(pb, 'w'), 3, 0)
self.assertEqual(mock_tee.call_args, ((pa, pb, 3, 0), {}))
@mock.patch.object(tee, '_c_tee')
def test_flags_list(self, mock_tee):
'''Test handling of flag lists'''
tee(1, 2, 3, [splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK])
flags = splice.SPLICE_F_MOVE | splice.SPLICE_F_NONBLOCK
self.assertEqual(mock_tee.call_args, ((1, 2, 3, flags), {}))
mock_tee.reset_mock()
tee(1, 2, 3, [])
self.assertEqual(mock_tee.call_args, ((1, 2, 3, 0), {}))
def test_errno(self):
'''Test handling of failures'''
# Invoke EBADF by using a read-only FD as fd_out
with open('/dev/null', 'r') as fd:
err = errno.EBADF
msg = r'\[Errno %d\] tee: %s' % (err, os.strerror(err))
try:
tee(fd, fd, 3, 0)
except IOError as e:
self.assertTrue(re.match(msg, str(e)))
else:
self.fail('Expected IOError was not raised')
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.splice.tee._c_tee', None)
def test_unavailable(self):
'''Test exception when unavailable'''
self.assertRaises(EnvironmentError, tee, 1, 2, 2, 0)
def test_unavailable_in_libc(self):
'''Test `available` attribute when `libc` has no `tee` support'''
class LibC(object):
'''A fake `libc` object tracking `tee` attribute access'''
def __init__(self):
self.tee_retrieved = False
@property
def tee(self):
self.tee_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Tee` instance
# Something you're not supposed to do in actual code
new_tee = type(tee)()
self.assertFalse(new_tee.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.tee_retrieved)
| swift-master | test/unit/common/test_splice.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(creiht): Tests
import unittest
from swift.common import exceptions
class TestExceptions(unittest.TestCase):
def test_replication_exception(self):
self.assertEqual(str(exceptions.ReplicationException()), '')
self.assertEqual(str(exceptions.ReplicationException('test')), 'test')
def test_replication_lock_timeout(self):
with exceptions.ReplicationLockTimeout(15, 'test') as exc:
self.assertTrue(isinstance(exc, exceptions.MessageTimeout))
def test_client_exception(self):
strerror = 'test: HTTP://random:888/randompath?foo=1 666 reason: ' \
'device /sdb1 content'
exc = exceptions.ClientException('test', http_scheme='HTTP',
http_host='random',
http_port=888,
http_path='/randompath',
http_query='foo=1',
http_status=666,
http_reason='reason',
http_device='/sdb1',
http_response_content='content')
self.assertEqual(str(exc), strerror)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_exceptions.py |
swift-master | test/unit/common/__init__.py |
|
# -*- coding:utf-8 -*-
# Copyright (c) 2010-2021 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from swift.common import recon
class TestCommonRecon(TestCase):
def test_server_type_to_recon_file(self):
# valid server types will come out as <server_type>.recon
for server_type in ('object', 'container', 'account', 'ACCount'):
self.assertEqual(recon.server_type_to_recon_file(server_type),
"%s.recon" % server_type.lower())
# other values will return a ValueError
for bad_server_type in ('obj', '', None, 'other', 'Account '):
self.assertRaises(ValueError,
recon.server_type_to_recon_file, bad_server_type)
| swift-master | test/unit/common/test_recon.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import time
import unittest
from getpass import getuser
import logging
from test.unit import tmpfile, with_tempdir, ConfigAssertMixin
import mock
import signal
from contextlib import contextmanager
import itertools
from collections import defaultdict
import errno
from textwrap import dedent
from swift.common import daemon, utils
from test.debug_logger import debug_logger
class MyDaemon(daemon.Daemon):
WORKERS_HEALTHCHECK_INTERVAL = 0
def __init__(self, conf):
self.conf = conf
self.logger = debug_logger('my-daemon')
MyDaemon.forever_called = False
MyDaemon.once_called = False
def run_forever(self):
MyDaemon.forever_called = True
def run_once(self):
MyDaemon.once_called = True
def run_raise(self):
raise OSError
def run_quit(self):
raise KeyboardInterrupt
class TestDaemon(unittest.TestCase):
def test_create(self):
d = daemon.Daemon({})
self.assertEqual(d.conf, {})
self.assertTrue(isinstance(d.logger, utils.LogAdapter))
def test_stubs(self):
d = daemon.Daemon({})
self.assertRaises(NotImplementedError, d.run_once)
self.assertRaises(NotImplementedError, d.run_forever)
class MyWorkerDaemon(MyDaemon):
def __init__(self, *a, **kw):
super(MyWorkerDaemon, self).__init__(*a, **kw)
MyWorkerDaemon.post_multiprocess_run_called = False
def get_worker_args(self, once=False, **kwargs):
return [kwargs for i in range(int(self.conf.get('workers', 0)))]
def is_healthy(self):
try:
return getattr(self, 'health_side_effects', []).pop(0)
except IndexError:
return True
def post_multiprocess_run(self):
MyWorkerDaemon.post_multiprocess_run_called = True
class TestWorkerDaemon(unittest.TestCase):
def test_stubs(self):
d = daemon.Daemon({})
self.assertRaises(NotImplementedError, d.run_once)
self.assertRaises(NotImplementedError, d.run_forever)
self.assertEqual([], d.get_worker_args())
self.assertEqual(True, d.is_healthy())
def test_my_worker_daemon(self):
d = MyWorkerDaemon({})
self.assertEqual([], d.get_worker_args())
self.assertTrue(d.is_healthy())
d = MyWorkerDaemon({'workers': '3'})
self.assertEqual([{'key': 'val'}] * 3, d.get_worker_args(key='val'))
d.health_side_effects = [True, False]
self.assertTrue(d.is_healthy())
self.assertFalse(d.is_healthy())
self.assertTrue(d.is_healthy())
class TestRunDaemon(unittest.TestCase, ConfigAssertMixin):
def setUp(self):
for patcher in [
mock.patch.object(utils, 'HASH_PATH_PREFIX', b'startcap'),
mock.patch.object(utils, 'HASH_PATH_SUFFIX', b'endcap'),
mock.patch.object(utils, 'drop_privileges', lambda *args: None),
mock.patch.object(utils, 'capture_stdio', lambda *args: None),
]:
patcher.start()
self.addCleanup(patcher.stop)
def test_run(self):
d = MyDaemon({})
self.assertFalse(MyDaemon.forever_called)
self.assertFalse(MyDaemon.once_called)
# test default
d.run()
self.assertEqual(d.forever_called, True)
# test once
d.run(once=True)
self.assertEqual(d.once_called, True)
def test_signal(self):
d = MyDaemon({})
with mock.patch('swift.common.daemon.signal') as mock_signal:
mock_signal.SIGTERM = signal.SIGTERM
daemon.DaemonStrategy(d, d.logger).run()
signal_args, kwargs = mock_signal.signal.call_args
sig, func = signal_args
self.assertEqual(sig, signal.SIGTERM)
with mock.patch('swift.common.daemon.os') as mock_os:
func()
self.assertEqual(mock_os.method_calls, [
mock.call.getpid(),
mock.call.killpg(0, signal.SIGTERM),
# hard exit because bare except handlers can trap SystemExit
mock.call._exit(0)
])
def test_run_daemon(self):
logging.logThreads = 1 # reset to default
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
with tmpfile(sample_conf) as conf_file, \
mock.patch('swift.common.utils.eventlet') as _utils_evt, \
mock.patch('eventlet.hubs.use_hub') as mock_use_hub, \
mock.patch('eventlet.debug') as _debug_evt:
with mock.patch.dict('os.environ', {'TZ': ''}), \
mock.patch('time.tzset') as mock_tzset:
daemon.run_daemon(MyDaemon, conf_file)
self.assertTrue(MyDaemon.forever_called)
self.assertEqual(os.environ['TZ'], 'UTC+0')
self.assertEqual(mock_tzset.mock_calls, [mock.call()])
self.assertEqual(mock_use_hub.mock_calls,
[mock.call(utils.get_hub())])
daemon.run_daemon(MyDaemon, conf_file, once=True)
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
socket=True,
select=True,
thread=True)
self.assertEqual(0, logging.logThreads) # fixed in monkey_patch
_debug_evt.hub_exceptions.assert_called_with(False)
self.assertEqual(MyDaemon.once_called, True)
# test raise in daemon code
with mock.patch.object(MyDaemon, 'run_once', MyDaemon.run_raise):
self.assertRaises(OSError, daemon.run_daemon, MyDaemon,
conf_file, once=True)
# test user quit
sio = six.StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
with mock.patch.object(MyDaemon, 'run_forever', MyDaemon.run_quit):
daemon.run_daemon(MyDaemon, conf_file, logger=logger)
self.assertTrue('user quit' in sio.getvalue().lower())
# test missing section
sample_conf = "[default]\nuser = %s\n" % getuser()
with tmpfile(sample_conf) as conf_file:
self.assertRaisesRegex(SystemExit,
'Unable to find my-daemon '
'config section in.*',
daemon.run_daemon, MyDaemon,
conf_file, once=True)
def test_run_daemon_diff_tz(self):
old_tz = os.environ.get('TZ', '')
try:
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
self.assertEqual((1970, 1, 1, 0, 0, 0), time.gmtime(0)[:6])
self.assertEqual((1969, 12, 31, 19, 0, 0), time.localtime(0)[:6])
self.assertEqual(18000, time.timezone)
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
with tmpfile(sample_conf) as conf_file, \
mock.patch('swift.common.utils.eventlet'), \
mock.patch('eventlet.hubs.use_hub'), \
mock.patch('eventlet.debug'):
daemon.run_daemon(MyDaemon, conf_file)
self.assertFalse(MyDaemon.once_called)
self.assertTrue(MyDaemon.forever_called)
self.assertEqual((1970, 1, 1, 0, 0, 0), time.gmtime(0)[:6])
self.assertEqual((1970, 1, 1, 0, 0, 0), time.localtime(0)[:6])
self.assertEqual(0, time.timezone)
finally:
os.environ['TZ'] = old_tz
time.tzset()
@with_tempdir
def test_run_deamon_from_conf_file(self, tempdir):
conf_path = os.path.join(tempdir, 'test-daemon.conf')
conf_body = """
[DEFAULT]
conn_timeout = 5
client_timeout = 1
[my-daemon]
CONN_timeout = 10
client_timeout = 2
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
with mock.patch('swift.common.utils.eventlet'), \
mock.patch('eventlet.hubs.use_hub'), \
mock.patch('eventlet.debug'):
d = daemon.run_daemon(MyDaemon, conf_path)
# my-daemon section takes priority (!?)
self.assertEqual('2', d.conf['client_timeout'])
self.assertEqual('10', d.conf['CONN_timeout'])
self.assertEqual('5', d.conf['conn_timeout'])
@with_tempdir
def test_run_daemon_from_conf_file_with_duplicate_var(self, tempdir):
conf_path = os.path.join(tempdir, 'test-daemon.conf')
conf_body = """
[DEFAULT]
client_timeout = 3
[my-daemon]
CLIENT_TIMEOUT = 2
client_timeout = 1
conn_timeout = 1.1
conn_timeout = 1.2
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
with mock.patch('swift.common.utils.eventlet'), \
mock.patch('eventlet.hubs.use_hub'), \
mock.patch('eventlet.debug'):
app_config = lambda: daemon.run_daemon(MyDaemon, tempdir)
# N.B. CLIENT_TIMEOUT/client_timeout are unique options
self.assertDuplicateOption(app_config, 'conn_timeout', '1.2')
@with_tempdir
def test_run_deamon_from_conf_dir(self, tempdir):
conf_files = {
'default': """
[DEFAULT]
conn_timeout = 5
client_timeout = 1
""",
'daemon': """
[DEFAULT]
CONN_timeout = 3
CLIENT_TIMEOUT = 4
[my-daemon]
CONN_timeout = 10
client_timeout = 2
""",
}
for filename, conf_body in conf_files.items():
path = os.path.join(tempdir, filename + '.conf')
with open(path, 'wt') as fd:
fd.write(dedent(conf_body))
with mock.patch('swift.common.utils.eventlet'), \
mock.patch('eventlet.hubs.use_hub'), \
mock.patch('eventlet.debug'):
d = daemon.run_daemon(MyDaemon, tempdir)
# my-daemon section takes priority (!?)
self.assertEqual('2', d.conf['client_timeout'])
self.assertEqual('10', d.conf['CONN_timeout'])
self.assertEqual('5', d.conf['conn_timeout'])
@with_tempdir
def test_run_daemon_from_conf_dir_with_duplicate_var(self, tempdir):
conf_files = {
'default': """
[DEFAULT]
client_timeout = 3
""",
'daemon': """
[my-daemon]
client_timeout = 2
CLIENT_TIMEOUT = 4
conn_timeout = 1.1
conn_timeout = 1.2
""",
}
for filename, conf_body in conf_files.items():
path = os.path.join(tempdir, filename + '.conf')
with open(path, 'wt') as fd:
fd.write(dedent(conf_body))
with mock.patch('swift.common.utils.eventlet'), \
mock.patch('eventlet.hubs.use_hub'), \
mock.patch('eventlet.debug'):
app_config = lambda: daemon.run_daemon(MyDaemon, tempdir)
# N.B. CLIENT_TIMEOUT/client_timeout are unique options
self.assertDuplicateOption(app_config, 'conn_timeout', '1.2')
@contextmanager
def mock_os(self, child_worker_cycles=3):
self.waitpid_calls = defaultdict(int)
def mock_waitpid(p, *args):
self.waitpid_calls[p] += 1
if self.waitpid_calls[p] >= child_worker_cycles:
rv = p
else:
rv = 0
return rv, 0
with mock.patch('swift.common.daemon.os.fork') as mock_fork, \
mock.patch('swift.common.daemon.os.waitpid', mock_waitpid), \
mock.patch('swift.common.daemon.os.kill') as mock_kill:
mock_fork.side_effect = (
'mock-pid-%s' % i for i in itertools.count())
self.mock_fork = mock_fork
self.mock_kill = mock_kill
yield
def test_fork_workers(self):
utils.logging_monkey_patch() # needed to log at notice
d = MyWorkerDaemon({'workers': 3})
strategy = daemon.DaemonStrategy(d, d.logger)
with self.mock_os():
strategy.run(once=True)
self.assertEqual([mock.call()] * 3, self.mock_fork.call_args_list)
self.assertEqual(self.waitpid_calls, {
'mock-pid-0': 3,
'mock-pid-1': 3,
'mock-pid-2': 3,
})
self.assertEqual([], self.mock_kill.call_args_list)
self.assertIn('Finished', d.logger.get_lines_for_level('notice')[-1])
self.assertTrue(MyWorkerDaemon.post_multiprocess_run_called)
def test_forked_worker(self):
d = MyWorkerDaemon({'workers': 3})
strategy = daemon.DaemonStrategy(d, d.logger)
with mock.patch('swift.common.daemon.os.fork') as mock_fork, \
mock.patch('swift.common.daemon.os._exit') as mock_exit:
mock_fork.return_value = 0
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, strategy.run, once=True)
self.assertTrue(d.once_called)
def test_restart_workers(self):
d = MyWorkerDaemon({'workers': 3})
strategy = daemon.DaemonStrategy(d, d.logger)
d.health_side_effects = [True, False]
with self.mock_os():
self.mock_kill.side_effect = lambda *args, **kwargs: setattr(
strategy, 'running', False)
strategy.run()
# six workers forked in total
self.assertEqual([mock.call()] * 6, self.mock_fork.call_args_list)
# since the daemon starts healthy, first pass checks children once
self.assertEqual(self.waitpid_calls, {
'mock-pid-0': 1,
'mock-pid-1': 1,
'mock-pid-2': 1,
})
# second pass is not healthy, original pid's killed
self.assertEqual(set([
('mock-pid-0', signal.SIGTERM),
('mock-pid-1', signal.SIGTERM),
('mock-pid-2', signal.SIGTERM),
]), set(c[0] for c in self.mock_kill.call_args_list[:3]))
# our mock_kill side effect breaks out of running, and cleanup kills
# remaining pids
self.assertEqual(set([
('mock-pid-3', signal.SIGTERM),
('mock-pid-4', signal.SIGTERM),
('mock-pid-5', signal.SIGTERM),
]), set(c[0] for c in self.mock_kill.call_args_list[3:]))
def test_worker_disappears(self):
d = MyWorkerDaemon({'workers': 3})
strategy = daemon.DaemonStrategy(d, d.logger)
strategy.register_worker_start('mock-pid', {'mock_options': True})
self.assertEqual(strategy.unspawned_worker_options, [])
self.assertEqual(strategy.options_by_pid, {
'mock-pid': {'mock_options': True}
})
# still running
with mock.patch('swift.common.daemon.os.waitpid') as mock_waitpid:
mock_waitpid.return_value = (0, 0)
strategy.check_on_all_running_workers()
self.assertEqual(strategy.unspawned_worker_options, [])
self.assertEqual(strategy.options_by_pid, {
'mock-pid': {'mock_options': True}
})
# finished
strategy = daemon.DaemonStrategy(d, d.logger)
strategy.register_worker_start('mock-pid', {'mock_options': True})
with mock.patch('swift.common.daemon.os.waitpid') as mock_waitpid:
mock_waitpid.return_value = ('mock-pid', 0)
strategy.check_on_all_running_workers()
self.assertEqual(strategy.unspawned_worker_options, [
{'mock_options': True}])
self.assertEqual(strategy.options_by_pid, {})
self.assertEqual(d.logger.get_lines_for_level('debug')[-1],
'Worker mock-pid exited')
# disappeared
strategy = daemon.DaemonStrategy(d, d.logger)
strategy.register_worker_start('mock-pid', {'mock_options': True})
with mock.patch('swift.common.daemon.os.waitpid') as mock_waitpid:
mock_waitpid.side_effect = OSError(
errno.ECHILD, os.strerror(errno.ECHILD))
mock_waitpid.return_value = ('mock-pid', 0)
strategy.check_on_all_running_workers()
self.assertEqual(strategy.unspawned_worker_options, [
{'mock_options': True}])
self.assertEqual(strategy.options_by_pid, {})
self.assertEqual(d.logger.get_lines_for_level('notice')[-1],
'Worker mock-pid died')
def test_worker_kills_pids_in_cleanup(self):
d = MyWorkerDaemon({'workers': 2})
strategy = daemon.DaemonStrategy(d, d.logger)
strategy.register_worker_start('mock-pid-1', {'mock_options': True})
strategy.register_worker_start('mock-pid-2', {'mock_options': True})
self.assertEqual(strategy.unspawned_worker_options, [])
self.assertEqual(strategy.options_by_pid, {
'mock-pid-1': {'mock_options': True},
'mock-pid-2': {'mock_options': True},
})
with mock.patch('swift.common.daemon.os.kill') as mock_kill:
strategy.cleanup()
self.assertEqual(strategy.unspawned_worker_options, [
{'mock_options': True}] * 2)
self.assertEqual(strategy.options_by_pid, {})
self.assertEqual(set([
('mock-pid-1', signal.SIGTERM),
('mock-pid-2', signal.SIGTERM),
]), set(c[0] for c in mock_kill.call_args_list))
self.assertEqual(set(d.logger.get_lines_for_level('debug')[-2:]),
set(['Cleaned up worker mock-pid-1',
'Cleaned up worker mock-pid-2']))
def test_worker_disappears_in_cleanup(self):
d = MyWorkerDaemon({'workers': 2})
strategy = daemon.DaemonStrategy(d, d.logger)
strategy.register_worker_start('mock-pid-1', {'mock_options': True})
strategy.register_worker_start('mock-pid-2', {'mock_options': True})
self.assertEqual(strategy.unspawned_worker_options, [])
self.assertEqual(strategy.options_by_pid, {
'mock-pid-1': {'mock_options': True},
'mock-pid-2': {'mock_options': True},
})
with mock.patch('swift.common.daemon.os.kill') as mock_kill:
mock_kill.side_effect = [None, OSError(errno.ECHILD,
os.strerror(errno.ECHILD))]
strategy.cleanup()
self.assertEqual(strategy.unspawned_worker_options, [
{'mock_options': True}] * 2)
self.assertEqual(strategy.options_by_pid, {})
self.assertEqual(set([
('mock-pid-1', signal.SIGTERM),
('mock-pid-2', signal.SIGTERM),
]), set(c[0] for c in mock_kill.call_args_list))
self.assertEqual(set(d.logger.get_lines_for_level('debug')[-2:]),
set(['Cleaned up worker mock-pid-1',
'Cleaned up worker mock-pid-2']))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_daemon.py |
# Copyright (c) 2010-2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import time
import os
import random
from tempfile import mkdtemp
from shutil import rmtree
from eventlet import Timeout
from swift.common.db_auditor import DatabaseAuditor
from test.debug_logger import debug_logger
class FakeDatabaseBroker(object):
def __init__(self, path, logger):
self.path = path
self.db_file = path
self.file = os.path.basename(path)
def is_deleted(self):
return False
def get_info(self):
if self.file.startswith('fail'):
raise ValueError
if self.file.startswith('true'):
return 'ok'
class FakeDatabaseAuditor(DatabaseAuditor):
server_type = "container"
broker_class = FakeDatabaseBroker
def _audit(self, info, broker):
return None
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_database_auditor')
self.logger = debug_logger()
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
fnames = ['true1.db', 'true2.db', 'true3.db',
'fail1.db', 'fail2.db']
for fn in fnames:
with open(os.path.join(self.testdir, fn), 'w+') as f:
f.write(' ')
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_run_forever(self):
sleep_times = random.randint(5, 10)
call_times = sleep_times - 1
class FakeTime(object):
def __init__(self):
self.times = 0
def sleep(self, sec):
self.times += 1
if self.times < sleep_times:
time.sleep(0.1)
else:
# stop forever by an error
raise ValueError()
def time(self):
return time.time()
conf = {}
test_auditor = FakeDatabaseAuditor(conf, logger=self.logger)
with mock.patch('swift.common.db_auditor.time', FakeTime()):
def fake_audit_location_generator(*args, **kwargs):
files = os.listdir(self.testdir)
return [(os.path.join(self.testdir, f), '', '') for f in files]
with mock.patch('swift.common.db_auditor.audit_location_generator',
fake_audit_location_generator):
self.assertRaises(ValueError, test_auditor.run_forever)
self.assertEqual(test_auditor.failures, 2 * call_times)
self.assertEqual(test_auditor.passes, 3 * call_times)
# now force timeout path code coverage
with mock.patch('swift.common.db_auditor.DatabaseAuditor.'
'_one_audit_pass', side_effect=Timeout()):
with mock.patch('swift.common.db_auditor.time', FakeTime()):
self.assertRaises(ValueError, test_auditor.run_forever)
def test_run_once(self):
conf = {}
test_auditor = FakeDatabaseAuditor(conf, logger=self.logger)
def fake_audit_location_generator(*args, **kwargs):
files = os.listdir(self.testdir)
return [(os.path.join(self.testdir, f), '', '') for f in files]
with mock.patch('swift.common.db_auditor.audit_location_generator',
fake_audit_location_generator):
test_auditor.run_once()
self.assertEqual(test_auditor.failures, 2)
self.assertEqual(test_auditor.passes, 3)
def test_one_audit_pass(self):
conf = {}
test_auditor = FakeDatabaseAuditor(conf, logger=self.logger)
def fake_audit_location_generator(*args, **kwargs):
files = sorted(os.listdir(self.testdir))
return [(os.path.join(self.testdir, f), '', '') for f in files]
# force code coverage for logging path
with mock.patch('swift.common.db_auditor.audit_location_generator',
fake_audit_location_generator),\
mock.patch('time.time',
return_value=(test_auditor.logging_interval * 2)):
test_auditor._one_audit_pass(0)
self.assertEqual(test_auditor.failures, 1)
self.assertEqual(test_auditor.passes, 3)
def test_database_auditor(self):
conf = {}
test_auditor = FakeDatabaseAuditor(conf, logger=self.logger)
files = os.listdir(self.testdir)
for f in files:
path = os.path.join(self.testdir, f)
test_auditor.audit(path)
self.assertEqual(test_auditor.failures, 2)
self.assertEqual(test_auditor.passes, 3)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_db_auditor.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import tempfile
import time
from six.moves import range
from test.unit import mock_check_drive
from swift.common.swob import Request, HTTPException
from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED, HTTP_NOT_IMPLEMENTED
from swift.common import constraints, utils
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH
class TestConstraints(unittest.TestCase):
def test_check_metadata_empty(self):
headers = {}
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
def test_check_metadata_good(self):
headers = {'X-Object-Meta-Name': 'Value'}
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
def test_check_metadata_empty_name(self):
headers = {'X-Object-Meta-': 'Value'}
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'object')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Metadata name cannot be empty', resp.body)
def test_check_metadata_non_utf8(self):
# Consciously using native "WSGI strings" in headers
headers = {'X-Account-Meta-Foo': '\xff'}
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'account')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Metadata must be valid UTF-8', resp.body)
headers = {'X-Container-Meta-\xff': 'foo'}
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'container')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Metadata must be valid UTF-8', resp.body)
# Object's OK; its metadata isn't serialized as JSON
headers = {'X-Object-Meta-Foo': '\xff'}
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
def test_check_metadata_name_length(self):
name = 'a' * constraints.MAX_META_NAME_LENGTH
headers = {'X-Object-Meta-%s' % name: 'v'}
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
headers = {'X-Object-Meta-%s' % name: 'v'}
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'object')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(
b'x-object-meta-%s' % name.encode('ascii'), resp.body.lower())
self.assertIn(b'Metadata name too long', resp.body)
def test_check_metadata_value_length(self):
value = 'a' * constraints.MAX_META_VALUE_LENGTH
headers = {'X-Object-Meta-Name': value}
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
headers = {'X-Object-Meta-Name': value}
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'object')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'x-object-meta-name', resp.body.lower())
self.assertIn(
str(constraints.MAX_META_VALUE_LENGTH).encode('ascii'), resp.body)
self.assertIn(b'Metadata value longer than 256', resp.body)
def test_check_metadata_count(self):
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
headers['X-Object-Meta-Too-Many'] = 'v'
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'object')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Too many metadata items', resp.body)
def test_check_metadata_size(self):
headers = {}
size = 0
chunk = constraints.MAX_META_NAME_LENGTH + \
constraints.MAX_META_VALUE_LENGTH
x = 0
while size + chunk < constraints.MAX_META_OVERALL_SIZE:
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
size += chunk
x += 1
self.assertIsNone(constraints.check_metadata(Request.blank(
'/', headers=headers), 'object'))
# add two more headers in case adding just one falls exactly on the
# limit (eg one header adds 1024 and the limit is 2048)
headers['X-Object-Meta-%04d%s' %
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d%s' %
(x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
'v' * constraints.MAX_META_VALUE_LENGTH
resp = constraints.check_metadata(Request.blank(
'/', headers=headers), 'object')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Total metadata too large', resp.body)
def test_check_object_creation_content_length(self):
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
self.assertIsNone(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'))
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_REQUEST_ENTITY_TOO_LARGE)
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
self.assertIsNone(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'))
headers = {'Transfer-Encoding': 'gzip',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Invalid Transfer-Encoding header value', resp.body)
headers = {'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_LENGTH_REQUIRED)
headers = {'Content-Length': 'abc',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Invalid Content-Length header value', resp.body)
headers = {'Transfer-Encoding': 'gzip,chunked',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_NOT_IMPLEMENTED)
def test_check_object_creation_name_length(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
self.assertIsNone(constraints.check_object_creation(Request.blank(
'/', headers=headers), name))
name = 'o' * (MAX_OBJECT_NAME_LENGTH + 1)
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), name)
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Object name length of %d longer than %d' %
(MAX_OBJECT_NAME_LENGTH + 1, MAX_OBJECT_NAME_LENGTH),
resp.body)
def test_check_object_creation_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Timestamp': str(time.time())}
self.assertIsNone(constraints.check_object_creation(Request.blank(
'/', headers=headers), 'object_name'))
headers = {'Transfer-Encoding': 'chunked',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'No content type', resp.body)
def test_check_object_creation_bad_content_type(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': '\xff\xff',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Content-Type', resp.body)
def test_check_object_creation_bad_delete_headers(self):
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Delete-After': 'abc',
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Non-integer X-Delete-After', resp.body)
t = str(int(time.time() - 60))
headers = {'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain',
'X-Delete-At': t,
'X-Timestamp': str(time.time())}
resp = constraints.check_object_creation(
Request.blank('/', headers=headers), 'object_name')
self.assertEqual(resp.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-At in past', resp.body)
def test_check_delete_headers(self):
# x-delete-at value should be relative to the request timestamp rather
# than time.time() so separate the two to ensure the checks are robust
ts = utils.Timestamp(time.time() + 100)
# X-Delete-After
headers = {'X-Delete-After': '600',
'X-Timestamp': ts.internal}
req = constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertIsInstance(req, Request)
self.assertIn('x-delete-at', req.headers)
self.assertNotIn('x-delete-after', req.headers)
expected_delete_at = str(int(ts) + 600)
self.assertEqual(req.headers.get('X-Delete-At'), expected_delete_at)
headers = {'X-Delete-After': 'abc',
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Non-integer X-Delete-After', cm.exception.body)
headers = {'X-Delete-After': '60.1',
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Non-integer X-Delete-After', cm.exception.body)
headers = {'X-Delete-After': '-1',
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-After in past', cm.exception.body)
headers = {'X-Delete-After': '0',
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-After in past', cm.exception.body)
# x-delete-after = 0 disallowed when it results in x-delete-at equal to
# the timestamp
headers = {'X-Delete-After': '0',
'X-Timestamp': utils.Timestamp(int(ts)).internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-After in past', cm.exception.body)
# X-Delete-At
delete_at = str(int(ts) + 100)
headers = {'X-Delete-At': delete_at,
'X-Timestamp': ts.internal}
req = constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertIsInstance(req, Request)
self.assertIn('x-delete-at', req.headers)
self.assertEqual(req.headers.get('X-Delete-At'), delete_at)
headers = {'X-Delete-At': 'abc',
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Non-integer X-Delete-At', cm.exception.body)
delete_at = str(int(ts) + 100) + '.1'
headers = {'X-Delete-At': delete_at,
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'Non-integer X-Delete-At', cm.exception.body)
delete_at = str(int(ts) - 1)
headers = {'X-Delete-At': delete_at,
'X-Timestamp': ts.internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-At in past', cm.exception.body)
# x-delete-at disallowed when exactly equal to timestamp
delete_at = str(int(ts))
headers = {'X-Delete-At': delete_at,
'X-Timestamp': utils.Timestamp(int(ts)).internal}
with self.assertRaises(HTTPException) as cm:
constraints.check_delete_headers(
Request.blank('/', headers=headers))
self.assertEqual(cm.exception.status_int, HTTP_BAD_REQUEST)
self.assertIn(b'X-Delete-At in past', cm.exception.body)
def test_check_delete_headers_removes_delete_after(self):
ts = utils.Timestamp.now()
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Delete-At': str(int(ts) + 40),
'X-Timestamp': ts.internal}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertNotIn('X-Delete-After', req.headers)
self.assertEqual(req.headers['X-Delete-At'], str(int(ts) + 42))
def test_check_delete_headers_sets_delete_at(self):
ts = utils.Timestamp.now()
expected = str(int(ts) + 1000)
# check delete-at is passed through
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-At': expected,
'X-Timestamp': ts.internal}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertIn('X-Delete-At', req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
# check delete-after is converted to delete-at
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Timestamp': ts.internal}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertIn('X-Delete-At', req.headers)
expected = str(int(ts) + 42)
self.assertEqual(req.headers['X-Delete-At'], expected)
# check delete-after takes precedence over delete-at
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Delete-At': str(int(ts) + 40),
'X-Timestamp': ts.internal}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertIn('X-Delete-At', req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
headers = {'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Delete-After': '42',
'X-Delete-At': str(int(ts) + 44),
'X-Timestamp': ts.internal}
req = Request.blank('/', headers=headers)
constraints.check_delete_headers(req)
self.assertIn('X-Delete-At', req.headers)
self.assertEqual(req.headers['X-Delete-At'], expected)
def test_check_drive_invalid_path(self):
root = '/srv/'
with mock_check_drive() as mocks:
drive = 'foo?bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_dir(root, drive)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_mount(root, drive)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo/bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, drive, True)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
drive = 'foo%bar'
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, drive, False)
self.assertEqual(str(exc_mgr.exception),
'%s is not a valid drive name' % drive)
self.assertEqual([], mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list)
def test_check_drive_ismount(self):
root = '/srv'
path = 'sdb1'
with mock_check_drive(ismount=True) as mocks:
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_dir(root, path)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb1 is not a directory')
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, path, False)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb1 is not a directory')
self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')],
mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list)
with mock_check_drive(ismount=True) as mocks:
self.assertEqual('/srv/sdb1', constraints.check_mount(root, path))
self.assertEqual('/srv/sdb1', constraints.check_drive(
root, path, True))
self.assertEqual([], mocks['isdir'].call_args_list)
self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')],
mocks['ismount'].call_args_list)
def test_check_drive_isdir(self):
root = '/srv'
path = 'sdb2'
with mock_check_drive(isdir=True) as mocks:
self.assertEqual('/srv/sdb2', constraints.check_dir(root, path))
self.assertEqual('/srv/sdb2', constraints.check_drive(
root, path, False))
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
mocks['isdir'].call_args_list)
self.assertEqual([], mocks['ismount'].call_args_list)
with mock_check_drive(isdir=True) as mocks:
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_mount(root, path)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb2 is not mounted')
with self.assertRaises(ValueError) as exc_mgr:
constraints.check_drive(root, path, True)
self.assertEqual(str(exc_mgr.exception),
'/srv/sdb2 is not mounted')
self.assertEqual([], mocks['isdir'].call_args_list)
self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')],
mocks['ismount'].call_args_list)
def test_check_float(self):
self.assertFalse(constraints.check_float(''))
self.assertTrue(constraints.check_float('0'))
def test_valid_timestamp(self):
self.assertRaises(HTTPException,
constraints.valid_timestamp,
Request.blank('/'))
self.assertRaises(HTTPException,
constraints.valid_timestamp,
Request.blank('/', headers={
'X-Timestamp': 'asdf'}))
timestamp = utils.Timestamp.now()
req = Request.blank('/', headers={'X-Timestamp': timestamp.internal})
self.assertEqual(timestamp, constraints.valid_timestamp(req))
req = Request.blank('/', headers={'X-Timestamp': timestamp.normal})
self.assertEqual(timestamp, constraints.valid_timestamp(req))
def test_check_utf8(self):
unicode_sample = u'\uc77c\uc601'
unicode_with_reserved = u'abc%sdef' % utils.RESERVED_STR
# Some false-y values
self.assertFalse(constraints.check_utf8(None))
self.assertFalse(constraints.check_utf8(''))
self.assertFalse(constraints.check_utf8(b''))
self.assertFalse(constraints.check_utf8(u''))
# invalid utf8 bytes
self.assertFalse(constraints.check_utf8(
unicode_sample.encode('utf-8')[::-1]))
# unicode with null
self.assertFalse(constraints.check_utf8(unicode_with_reserved))
# utf8 bytes with null
self.assertFalse(constraints.check_utf8(
unicode_with_reserved.encode('utf8')))
self.assertTrue(constraints.check_utf8('this is ascii and utf-8, too'))
self.assertTrue(constraints.check_utf8(unicode_sample))
self.assertTrue(constraints.check_utf8(unicode_sample.encode('utf8')))
def test_check_utf8_internal(self):
unicode_with_reserved = u'abc%sdef' % utils.RESERVED_STR
# sanity
self.assertFalse(constraints.check_utf8(unicode_with_reserved))
self.assertTrue(constraints.check_utf8('foobar', internal=True))
# internal allows reserved names
self.assertTrue(constraints.check_utf8(unicode_with_reserved,
internal=True))
def test_check_utf8_non_canonical(self):
self.assertFalse(constraints.check_utf8(b'\xed\xa0\xbc\xed\xbc\xb8'))
self.assertTrue(constraints.check_utf8(u'\U0001f338'))
self.assertTrue(constraints.check_utf8(b'\xf0\x9f\x8c\xb8'))
self.assertTrue(constraints.check_utf8(u'\U0001f338'.encode('utf8')))
self.assertFalse(constraints.check_utf8(b'\xed\xa0\xbd\xed\xb9\x88'))
self.assertTrue(constraints.check_utf8(u'\U0001f648'))
def test_check_utf8_lone_surrogates(self):
self.assertFalse(constraints.check_utf8(b'\xed\xa0\xbc'))
self.assertFalse(constraints.check_utf8(u'\ud83c'))
self.assertFalse(constraints.check_utf8(b'\xed\xb9\x88'))
self.assertFalse(constraints.check_utf8(u'\ude48'))
self.assertFalse(constraints.check_utf8(u'\ud800'))
self.assertFalse(constraints.check_utf8(u'\udc00'))
self.assertFalse(constraints.check_utf8(u'\udcff'))
self.assertFalse(constraints.check_utf8(u'\udfff'))
def test_validate_bad_meta(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-object-meta-hello':
'ab' * constraints.MAX_HEADER_SIZE})
self.assertEqual(constraints.check_metadata(req, 'object').status_int,
HTTP_BAD_REQUEST)
resp = constraints.check_metadata(req, 'object')
self.assertIsNotNone(resp)
self.assertIn(b'x-object-meta-hello', resp.body.lower())
def test_validate_constraints(self):
c = constraints
self.assertGreater(c.MAX_META_OVERALL_SIZE, c.MAX_META_NAME_LENGTH)
self.assertGreater(c.MAX_META_OVERALL_SIZE, c.MAX_META_VALUE_LENGTH)
self.assertGreater(c.MAX_HEADER_SIZE, c.MAX_META_NAME_LENGTH)
self.assertGreater(c.MAX_HEADER_SIZE, c.MAX_META_VALUE_LENGTH)
def test_check_account_format(self):
req = Request.blank(
'/v/a/c/o',
headers={'X-Copy-From-Account': 'account/with/slashes'})
self.assertRaises(HTTPException,
constraints.check_account_format,
req, req.headers['X-Copy-From-Account'])
req = Request.blank(
'/v/a/c/o',
headers={'X-Copy-From-Account': ''})
self.assertRaises(HTTPException,
constraints.check_account_format,
req, req.headers['X-Copy-From-Account'])
def test_check_container_format(self):
invalid_versions_locations = (
'container/with/slashes',
'', # empty
)
for versions_location in invalid_versions_locations:
req = Request.blank(
'/v/a/c/o', headers={
'X-Versions-Location': versions_location})
with self.assertRaises(HTTPException) as cm:
constraints.check_container_format(
req, req.headers['X-Versions-Location'])
self.assertTrue(cm.exception.body.startswith(
b'Container name cannot'))
def test_valid_api_version(self):
version = 'v1'
self.assertTrue(constraints.valid_api_version(version))
version = 'v1.0'
self.assertTrue(constraints.valid_api_version(version))
version = 'v2'
self.assertFalse(constraints.valid_api_version(version))
class TestConstraintsConfig(unittest.TestCase):
def test_default_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# if there is local over-rides in swift.conf we just continue on
if key in constraints.OVERRIDE_CONSTRAINTS:
continue
# module level attrs (that aren't in OVERRIDE) should have the
# same value as the DEFAULT map
module_level_value = getattr(constraints, key.upper())
self.assertEqual(constraints.DEFAULT_CONSTRAINTS[key],
module_level_value)
def test_effective_constraints(self):
for key in constraints.DEFAULT_CONSTRAINTS:
# module level attrs should always mirror the same value as the
# EFFECTIVE map
module_level_value = getattr(constraints, key.upper())
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
# if there are local over-rides in swift.conf those should be
# reflected in the EFFECTIVE, otherwise we expect the DEFAULTs
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
constraints.OVERRIDE_CONSTRAINTS.get(
key, constraints.DEFAULT_CONSTRAINTS[key]))
def test_override_constraints(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write(b'[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write(b'%s = 1\n' % key.encode('ascii'))
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
for key, default in constraints.DEFAULT_CONSTRAINTS.items():
# module level attrs should all be 1
module_level_value = getattr(constraints, key.upper())
if isinstance(default, int):
self.assertEqual(module_level_value, 1)
elif isinstance(default, str):
self.assertEqual(module_level_value, '1')
else:
self.assertEqual(module_level_value, ['1'])
# all keys should be in OVERRIDE
self.assertEqual(constraints.OVERRIDE_CONSTRAINTS[key],
module_level_value)
# module level attrs should always mirror the same value as
# the EFFECTIVE map
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS[key],
module_level_value)
finally:
constraints.reload_constraints()
def test_reload_reset(self):
try:
with tempfile.NamedTemporaryFile() as f:
f.write(b'[swift-constraints]\n')
# set everything to 1
for key in constraints.DEFAULT_CONSTRAINTS:
f.write(b'%s = 1\n' % key.encode('ascii'))
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
self.assertTrue(constraints.SWIFT_CONSTRAINTS_LOADED)
self.assertEqual(sorted(constraints.DEFAULT_CONSTRAINTS.keys()),
sorted(constraints.OVERRIDE_CONSTRAINTS.keys()))
# file is now deleted...
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
# no constraints have been loaded from non-existent swift.conf
self.assertFalse(constraints.SWIFT_CONSTRAINTS_LOADED)
# no constraints are in OVERRIDE
self.assertEqual([], list(constraints.OVERRIDE_CONSTRAINTS.keys()))
# the EFFECTIVE constraints mirror DEFAULT
self.assertEqual(constraints.EFFECTIVE_CONSTRAINTS,
constraints.DEFAULT_CONSTRAINTS)
finally:
constraints.reload_constraints()
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_constraints.py |
# Copyright (c) 2022 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import unittest
from swift.common import digest
from test.debug_logger import debug_logger
class TestDigestUtils(unittest.TestCase):
"""Tests for swift.common.middleware.digest """
def setUp(self):
self.logger = debug_logger('test_digest_utils')
def test_get_hmac(self):
self.assertEqual(
digest.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_hmac_ip_range(self):
self.assertEqual(
digest.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_get_hmac_ip_range_non_binary_type(self):
self.assertEqual(
digest.get_hmac(
u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'),
'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
def test_get_hmac_digest(self):
self.assertEqual(
digest.get_hmac(u'GET', u'/path', 1, u'abc', digest='sha256'),
'64c5558394f86b042ce1e929b34907abd9d0a57f3e20cd3f93cffd83de0206a7')
self.assertEqual(
digest.get_hmac(
u'GET', u'/path', 1, u'abc', digest=hashlib.sha256),
'64c5558394f86b042ce1e929b34907abd9d0a57f3e20cd3f93cffd83de0206a7')
self.assertEqual(
digest.get_hmac(u'GET', u'/path', 1, u'abc', digest='sha512'),
'7e95af818aec1b69b53fc2cb6d69456ec64ebda6c17b8fc8b7303b78acc8ca'
'14fc4aed96c1614a8e9d6ff45a6237711d8be294cda679624825d79aa6959b'
'5229')
self.assertEqual(
digest.get_hmac(
u'GET', u'/path', 1, u'abc', digest=hashlib.sha512),
'7e95af818aec1b69b53fc2cb6d69456ec64ebda6c17b8fc8b7303b78acc8ca'
'14fc4aed96c1614a8e9d6ff45a6237711d8be294cda679624825d79aa6959b'
'5229')
def test_extract_digest_and_algorithm(self):
self.assertEqual(
digest.extract_digest_and_algorithm(
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f'),
('sha1', 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha1:sw3eTSuFYrhJZGbDtGsrmsUFRGE='),
('sha1', 'b30dde4d2b8562b8496466c3b46b2b9ac5054461'))
# also good with '=' stripped
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha1:sw3eTSuFYrhJZGbDtGsrmsUFRGE'),
('sha1', 'b30dde4d2b8562b8496466c3b46b2b9ac5054461'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'b963712313cd4236696fb4c4cf11fc56'
'ff4158e0bcbf1d4424df147783fd1045'),
('sha256', 'b963712313cd4236696fb4c4cf11fc56'
'ff4158e0bcbf1d4424df147783fd1045'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha256:uWNxIxPNQjZpb7TEzxH8Vv9BWOC8vx1EJN8Ud4P9EEU='),
('sha256', 'b963712313cd4236696fb4c4cf11fc56'
'ff4158e0bcbf1d4424df147783fd1045'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha256:uWNxIxPNQjZpb7TEzxH8Vv9BWOC8vx1EJN8Ud4P9EEU'),
('sha256', 'b963712313cd4236696fb4c4cf11fc56'
'ff4158e0bcbf1d4424df147783fd1045'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'26df3d9d59da574d6f8d359cb2620b1b'
'86737215c38c412dfee0a410acea1ac4'
'285ad0c37229ca74e715c443979da17d'
'3d77a97d2ac79cc5e395b05bfa4bdd30'),
('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
'86737215c38c412dfee0a410acea1ac4'
'285ad0c37229ca74e715c443979da17d'
'3d77a97d2ac79cc5e395b05bfa4bdd30'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha512:Jt89nVnaV01vjTWcsmILG4ZzchXDjEEt/uCkEKzq'
'GsQoWtDDcinKdOcVxEOXnaF9PXepfSrHnMXjlbBb+kvdMA=='),
('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
'86737215c38c412dfee0a410acea1ac4'
'285ad0c37229ca74e715c443979da17d'
'3d77a97d2ac79cc5e395b05bfa4bdd30'))
self.assertEqual(
digest.extract_digest_and_algorithm(
'sha512:Jt89nVnaV01vjTWcsmILG4ZzchXDjEEt_uCkEKzq'
'GsQoWtDDcinKdOcVxEOXnaF9PXepfSrHnMXjlbBb-kvdMA'),
('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
'86737215c38c412dfee0a410acea1ac4'
'285ad0c37229ca74e715c443979da17d'
'3d77a97d2ac79cc5e395b05bfa4bdd30'))
with self.assertRaises(ValueError):
digest.extract_digest_and_algorithm('')
with self.assertRaises(ValueError):
digest.extract_digest_and_algorithm(
'exactly_forty_chars_but_not_hex_encoded!')
# Too short (md5)
with self.assertRaises(ValueError):
digest.extract_digest_and_algorithm(
'd41d8cd98f00b204e9800998ecf8427e')
# but you can slip it in via the prefix notation!
self.assertEqual(
digest.extract_digest_and_algorithm(
'md5:1B2M2Y8AsgTpgAmY7PhCfg'),
('md5', 'd41d8cd98f00b204e9800998ecf8427e'))
def test_get_allowed_digests(self):
# start with defaults
allowed, deprecated = digest.get_allowed_digests(
''.split(), self.logger)
self.assertEqual(allowed, {'sha256', 'sha512', 'sha1'})
self.assertEqual(deprecated, {'sha1'})
warning_lines = self.logger.get_lines_for_level('warning')
expected_warning_line = (
'The following digest algorithms are allowed by default but '
'deprecated: sha1. Support will be disabled by default in a '
'future release, and later removed entirely.')
self.assertIn(expected_warning_line, warning_lines)
self.logger.clear()
# now with a subset
allowed, deprecated = digest.get_allowed_digests(
'sha1 sha256'.split(), self.logger)
self.assertEqual(allowed, {'sha256', 'sha1'})
self.assertEqual(deprecated, {'sha1'})
warning_lines = self.logger.get_lines_for_level('warning')
expected_warning_line = (
'The following digest algorithms are configured but '
'deprecated: sha1. Support will be removed in a future release.')
self.assertIn(expected_warning_line, warning_lines)
self.logger.clear()
# Now also with an unsupported digest
allowed, deprecated = digest.get_allowed_digests(
'sha1 sha256 md5'.split(), self.logger)
self.assertEqual(allowed, {'sha256', 'sha1'})
self.assertEqual(deprecated, {'sha1'})
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn(expected_warning_line, warning_lines)
expected_unsupported_warning_line = (
'The following digest algorithms are configured but not '
'supported: md5')
self.assertIn(expected_unsupported_warning_line, warning_lines)
self.logger.clear()
# Now with no deprecated digests
allowed, deprecated = digest.get_allowed_digests(
'sha256 sha512'.split(), self.logger)
self.assertEqual(allowed, {'sha256', 'sha512'})
self.assertEqual(deprecated, set())
warning_lines = self.logger.get_lines_for_level('warning')
self.assertFalse(warning_lines)
self.logger.clear()
# no valid digest
# Now also with an unsupported digest
with self.assertRaises(ValueError):
digest.get_allowed_digests(['md5'], self.logger)
warning_lines = self.logger.get_lines_for_level('warning')
self.assertIn(expected_unsupported_warning_line, warning_lines)
| swift-master | test/unit/common/test_digest.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import unittest
import zlib
import os
from io import BytesIO
from textwrap import dedent
import six
from six.moves import range, zip_longest
from six.moves.urllib.parse import quote, parse_qsl
from swift.common import exceptions, internal_client, request_helpers, swob, \
utils
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import StoragePolicy
from swift.common.middleware.proxy_logging import ProxyLoggingMiddleware
from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from test.debug_logger import debug_logger
from test.unit import with_tempdir, write_fake_ring, patch_policies
from test.unit.common.middleware.helpers import FakeSwift, LeakTrackingIter
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
class FakeConn(object):
def __init__(self, body=None):
if body is None:
body = []
self.body = body
def read(self):
return json.dumps(self.body).encode('ascii')
def info(self):
return {}
def not_sleep(seconds):
pass
def unicode_string(start, length):
return u''.join([six.unichr(x) for x in range(start, start + length)])
def path_parts():
account = unicode_string(1000, 4) + ' ' + unicode_string(1100, 4)
container = unicode_string(2000, 4) + ' ' + unicode_string(2100, 4)
obj = unicode_string(3000, 4) + ' ' + unicode_string(3100, 4)
return account, container, obj
def make_path(account, container=None, obj=None):
path = '/v1/%s' % quote(account.encode('utf-8'))
if container:
path += '/%s' % quote(container.encode('utf-8'))
if obj:
path += '/%s' % quote(obj.encode('utf-8'))
return path
def make_path_info(account, container=None, obj=None):
# FakeSwift keys on PATH_INFO - which is *encoded* but unquoted
path = '/v1/%s' % '/'.join(
p for p in (account, container, obj) if p)
return swob.bytes_to_wsgi(path.encode('utf-8'))
def get_client_app():
app = FakeSwift()
client = internal_client.InternalClient({}, 'test', 1, app=app)
return client, app
class InternalClient(internal_client.InternalClient):
def __init__(self):
pass
class GetMetadataInternalClient(internal_client.InternalClient):
def __init__(self, test, path, metadata_prefix, acceptable_statuses):
self.test = test
self.path = path
self.metadata_prefix = metadata_prefix
self.acceptable_statuses = acceptable_statuses
self.get_metadata_called = 0
self.metadata = 'some_metadata'
def _get_metadata(self, path, metadata_prefix, acceptable_statuses=None,
headers=None, params=None):
self.get_metadata_called += 1
self.test.assertEqual(self.path, path)
self.test.assertEqual(self.metadata_prefix, metadata_prefix)
self.test.assertEqual(self.acceptable_statuses, acceptable_statuses)
return self.metadata
class SetMetadataInternalClient(internal_client.InternalClient):
def __init__(
self, test, path, metadata, metadata_prefix, acceptable_statuses):
self.test = test
self.path = path
self.metadata = metadata
self.metadata_prefix = metadata_prefix
self.acceptable_statuses = acceptable_statuses
self.set_metadata_called = 0
self.metadata = 'some_metadata'
def _set_metadata(
self, path, metadata, metadata_prefix='',
acceptable_statuses=None):
self.set_metadata_called += 1
self.test.assertEqual(self.path, path)
self.test.assertEqual(self.metadata_prefix, metadata_prefix)
self.test.assertEqual(self.metadata, metadata)
self.test.assertEqual(self.acceptable_statuses, acceptable_statuses)
class IterInternalClient(internal_client.InternalClient):
def __init__(
self, test, path, marker, end_marker, prefix, acceptable_statuses,
items):
self.test = test
self.path = path
self.marker = marker
self.end_marker = end_marker
self.prefix = prefix
self.acceptable_statuses = acceptable_statuses
self.items = items
def _iter_items(
self, path, marker='', end_marker='', prefix='',
acceptable_statuses=None):
self.test.assertEqual(self.path, path)
self.test.assertEqual(self.marker, marker)
self.test.assertEqual(self.end_marker, end_marker)
self.test.assertEqual(self.prefix, prefix)
self.test.assertEqual(self.acceptable_statuses, acceptable_statuses)
for item in self.items:
yield item
class TestCompressingfileReader(unittest.TestCase):
def test_init(self):
class CompressObj(object):
def __init__(self, test, *args):
self.test = test
self.args = args
def method(self, *args):
self.test.assertEqual(self.args, args)
return self
try:
compressobj = CompressObj(
self, 9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
old_compressobj = internal_client.compressobj
internal_client.compressobj = compressobj.method
f = BytesIO(b'')
fobj = internal_client.CompressingFileReader(f)
self.assertEqual(f, fobj._f)
self.assertEqual(compressobj, fobj._compressor)
self.assertEqual(False, fobj.done)
self.assertEqual(True, fobj.first)
self.assertEqual(0, fobj.crc32)
self.assertEqual(0, fobj.total_size)
finally:
internal_client.compressobj = old_compressobj
def test_read(self):
exp_data = b'abcdefghijklmnopqrstuvwxyz'
fobj = internal_client.CompressingFileReader(
BytesIO(exp_data), chunk_size=5)
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
data = b''.join(d.decompress(chunk)
for chunk in iter(fobj.read, b''))
self.assertEqual(exp_data, data)
def test_seek(self):
exp_data = b'abcdefghijklmnopqrstuvwxyz'
fobj = internal_client.CompressingFileReader(
BytesIO(exp_data), chunk_size=5)
# read a couple of chunks only
for _ in range(2):
fobj.read()
# read whole thing after seek and check data
fobj.seek(0)
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
data = b''.join(d.decompress(chunk)
for chunk in iter(fobj.read, b''))
self.assertEqual(exp_data, data)
def test_seek_not_implemented_exception(self):
fobj = internal_client.CompressingFileReader(
BytesIO(b''), chunk_size=5)
self.assertRaises(NotImplementedError, fobj.seek, 10)
self.assertRaises(NotImplementedError, fobj.seek, 0, 10)
class TestInternalClient(unittest.TestCase):
@mock.patch('swift.common.utils.HASH_PATH_SUFFIX', new=b'endcap')
@with_tempdir
def test_load_from_config(self, tempdir):
conf_path = os.path.join(tempdir, 'interal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
auto_create_account_prefix = -
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
object_ring_path = os.path.join(tempdir, 'object.ring.gz')
write_fake_ring(object_ring_path)
logger = debug_logger('test-ic')
self.assertEqual(logger.get_lines_for_level('warning'), [])
with patch_policies([StoragePolicy(0, 'legacy', True)]):
with mock.patch('swift.proxy.server.get_logger',
lambda *a, **kw: logger):
client = internal_client.InternalClient(conf_path, 'test', 1)
self.assertEqual(logger.get_lines_for_level('warning'), [
'Option auto_create_account_prefix is deprecated. '
'Configure auto_create_account_prefix under the '
'swift-constraints section of swift.conf. This option will '
'be ignored in a future release.'])
self.assertEqual(client.account_ring,
client.app.app.app.account_ring)
self.assertEqual(client.account_ring.serialized_path,
account_ring_path)
self.assertEqual(client.container_ring,
client.app.app.app.container_ring)
self.assertEqual(client.container_ring.serialized_path,
container_ring_path)
object_ring = client.app.app.app.get_object_ring(0)
self.assertEqual(client.get_object_ring(0),
object_ring)
self.assertEqual(object_ring.serialized_path,
object_ring_path)
self.assertEqual(client.auto_create_account_prefix, '-')
@mock.patch('swift.common.utils.HASH_PATH_SUFFIX', new=b'endcap')
@with_tempdir
def test_load_from_config_with_global_conf(self, tempdir):
account_ring_path = os.path.join(tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
object_ring_path = os.path.join(tempdir, 'object.ring.gz')
write_fake_ring(object_ring_path)
# global_conf will override the 'x = y' syntax in conf file...
conf_path = os.path.join(tempdir, 'internal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
log_name = conf-file-log-name
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
auto_create_account_prefix = -
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
log_name = catch-errors-log-name
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
global_conf = {'log_name': 'global-conf-log-name'}
with patch_policies([StoragePolicy(0, 'legacy', True)]):
client = internal_client.InternalClient(
conf_path, 'test', 1, global_conf=global_conf)
self.assertEqual('global-conf-log-name', client.app.logger.server)
# ...but the 'set x = y' syntax in conf file DEFAULT section will
# override global_conf
conf_body = """
[DEFAULT]
swift_dir = %s
set log_name = conf-file-log-name
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
auto_create_account_prefix = -
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
log_name = catch-errors-log-name
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
global_conf = {'log_name': 'global-conf-log-name'}
with patch_policies([StoragePolicy(0, 'legacy', True)]):
client = internal_client.InternalClient(
conf_path, 'test', 1, global_conf=global_conf)
self.assertEqual('conf-file-log-name', client.app.logger.server)
# ...and the 'set x = y' syntax in conf file app section will override
# DEFAULT section and global_conf
conf_body = """
[DEFAULT]
swift_dir = %s
set log_name = conf-file-log-name
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
auto_create_account_prefix = -
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
set log_name = catch-errors-log-name
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
global_conf = {'log_name': 'global-conf-log-name'}
with patch_policies([StoragePolicy(0, 'legacy', True)]):
client = internal_client.InternalClient(
conf_path, 'test', 1, global_conf=global_conf)
self.assertEqual('catch-errors-log-name', client.app.logger.server)
def test_init(self):
conf_path = 'some_path'
app = FakeSwift()
user_agent = 'some_user_agent'
request_tries = 123
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp,\
self.assertRaises(ValueError):
# First try with a bad arg
internal_client.InternalClient(
conf_path, user_agent, request_tries=0)
mock_loadapp.assert_not_called()
# if we load it with the gatekeeper middleware then we also get a
# value error
gate_keeper_app = GatekeeperMiddleware(app, {})
gate_keeper_app._pipeline_final_app = app
gate_keeper_app._pipeline = [gate_keeper_app, app]
with mock.patch.object(
internal_client, 'loadapp', return_value=gate_keeper_app) \
as mock_loadapp, self.assertRaises(ValueError) as err:
internal_client.InternalClient(
conf_path, user_agent, request_tries)
self.assertEqual(
str(err.exception),
('Gatekeeper middleware is not allowed in the InternalClient '
'proxy pipeline'))
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp:
client = internal_client.InternalClient(
conf_path, user_agent, request_tries)
mock_loadapp.assert_called_once_with(
conf_path, global_conf=None, allow_modify_pipeline=False)
self.assertEqual(app, client.app)
self.assertEqual(user_agent, client.user_agent)
self.assertEqual(request_tries, client.request_tries)
self.assertFalse(client.use_replication_network)
client = internal_client.InternalClient(
conf_path, user_agent, request_tries, app=app,
use_replication_network=True)
self.assertEqual(app, client.app)
self.assertEqual(user_agent, client.user_agent)
self.assertEqual(request_tries, client.request_tries)
self.assertTrue(client.use_replication_network)
global_conf = {'log_name': 'custom'}
client = internal_client.InternalClient(
conf_path, user_agent, request_tries, app=app,
use_replication_network=True, global_conf=global_conf)
self.assertEqual(app, client.app)
self.assertEqual(user_agent, client.user_agent)
self.assertEqual(request_tries, client.request_tries)
self.assertTrue(client.use_replication_network)
def test_init_allow_modify_pipeline(self):
conf_path = 'some_path'
app = FakeSwift()
user_agent = 'some_user_agent'
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp,\
self.assertRaises(ValueError) as cm:
internal_client.InternalClient(
conf_path, user_agent, 1, allow_modify_pipeline=True)
mock_loadapp.assert_not_called()
self.assertIn("'allow_modify_pipeline' is no longer supported",
str(cm.exception))
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp:
internal_client.InternalClient(
conf_path, user_agent, 1, allow_modify_pipeline=False)
mock_loadapp.assert_called_once_with(
conf_path, allow_modify_pipeline=False, global_conf=None)
def test_gatekeeper_not_loaded(self):
app = FakeSwift()
pipeline = [app]
class RandomMiddleware(object):
def __init__(self, app):
self.app = app
self._pipeline_final_app = app
self._pipeline = pipeline
self._pipeline.insert(0, self)
# if there is no Gatekeeper middleware then it's false
# just the final app
self.assertFalse(
internal_client.InternalClient.check_gatekeeper_not_loaded(app))
# now with a bunch of middlewares
app_no_gatekeeper = app
for i in range(5):
app_no_gatekeeper = RandomMiddleware(app_no_gatekeeper)
self.assertFalse(
internal_client.InternalClient.check_gatekeeper_not_loaded(
app_no_gatekeeper))
# But if we put the gatekeeper on the end, it will be found
app_with_gatekeeper = GatekeeperMiddleware(app_no_gatekeeper, {})
pipeline.insert(0, app_with_gatekeeper)
app_with_gatekeeper._pipeline = pipeline
with self.assertRaises(ValueError) as err:
internal_client.InternalClient.check_gatekeeper_not_loaded(
app_with_gatekeeper)
self.assertEqual(str(err.exception),
('Gatekeeper middleware is not allowed in the '
'InternalClient proxy pipeline'))
# even if we bury deep into the pipeline
for i in range(5):
app_with_gatekeeper = RandomMiddleware(app_with_gatekeeper)
with self.assertRaises(ValueError) as err:
internal_client.InternalClient.check_gatekeeper_not_loaded(
app_with_gatekeeper)
self.assertEqual(str(err.exception),
('Gatekeeper middleware is not allowed in the '
'InternalClient proxy pipeline'))
def test_make_request_sets_user_agent(self):
class FakeApp(FakeSwift):
def __init__(self, test):
super(FakeApp, self).__init__()
self.test = test
def __call__(self, env, start_response):
self.test.assertNotIn(
'HTTP_X_BACKEND_USE_REPLICATION_NETWORK', env)
self.test.assertEqual(self.backend_user_agent,
"some_agent")
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 1, use_replication_network=False,
app=FakeApp(self))
client.make_request('GET', '/', {}, (200,))
def test_make_request_clears_txn_id_after_calling_app(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, logger):
def fake_app(env, start_response):
self.app.logger.txn_id = 'foo'
self.app.logger.debug('Inside of request')
start_response('200 Ok', [('Content-Length', '0')])
return []
self.test = test
self.user_agent = 'some_agent'
self.app = fake_app
self.app.logger = logger
self.request_tries = 1
self.use_replication_network = False
fake_logger = debug_logger()
logger = utils.LogAdapter(fake_logger, 'test-server')
# Make sure there's no transaction ID set -- other tests may have
# polluted the logger
logger.txn_id = None
logger.debug('Before request')
client = InternalClient(self, logger)
client.make_request('GET', '/', {}, (200,))
logger.debug('After request')
self.assertEqual([(args[0], kwargs['extra'].get('txn_id'))
for args, kwargs in fake_logger.log_dict['debug']], [
('Before request', None),
('Inside of request', 'foo'),
('After request', None),
])
def test_make_request_defaults_replication_network_header(self):
class FakeApp(FakeSwift):
def __init__(self, test):
super(FakeApp, self).__init__()
self.test = test
self.expected_header_value = None
def __call__(self, env, start_response):
if self.expected_header_value is None:
self.test.assertNotIn(
'HTTP_X_BACKEND_USE_REPLICATION_NETWORK', env)
else:
hdr_val = env['HTTP_X_BACKEND_USE_REPLICATION_NETWORK']
self.test.assertEqual(self.expected_header_value, hdr_val)
self.test.assertEqual(self.backend_user_agent,
'some_agent')
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 1, use_replication_network=False,
app=FakeApp(self))
client.make_request('GET', '/', {}, (200,))
# Caller can still override
client.app.expected_header_value = 'false'
client.make_request('GET', '/', {
request_helpers.USE_REPLICATION_NETWORK_HEADER: 'false'}, (200,))
client.app.expected_header_value = 'true'
client.make_request('GET', '/', {
request_helpers.USE_REPLICATION_NETWORK_HEADER: 'true'}, (200,))
# Switch default behavior
client.use_replication_network = True
client.make_request('GET', '/', {}, (200,))
client.app.expected_header_value = 'false'
client.make_request('GET', '/', {
request_helpers.USE_REPLICATION_NETWORK_HEADER: 'false'}, (200,))
client.app.expected_header_value = 'on'
client.make_request('GET', '/', {
request_helpers.USE_REPLICATION_NETWORK_HEADER: 'on'}, (200,))
def test_make_request_sets_query_string(self):
captured_envs = []
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
captured_envs.append(env)
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 1, use_replication_network=False,
app=FakeApp())
params = {'param1': 'p1', 'tasty': 'soup'}
client.make_request('GET', '/', {}, (200,), params=params)
actual_params = dict(parse_qsl(captured_envs[0]['QUERY_STRING'],
keep_blank_values=True,
strict_parsing=True))
self.assertEqual(params, actual_params)
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
def test_make_request_retries(self):
class FakeApp(FakeSwift):
def __init__(self):
super(FakeApp, self).__init__()
self.request_tries = 4
self.tries = 0
def __call__(self, env, start_response):
self.tries += 1
if self.tries < self.request_tries:
start_response(
'500 Internal Server Error', [('Content-Length', '0')])
else:
start_response('200 Ok', [('Content-Length', '0')])
return []
class InternalClient(internal_client.InternalClient):
def __init__(self, *args, **kwargs):
self.test = kwargs.pop('test')
super(InternalClient, self).__init__(*args, **kwargs)
self.sleep_called = 0
def sleep(self, seconds):
self.sleep_called += 1
self.test.assertEqual(2 ** (self.sleep_called), seconds)
client = InternalClient(
None, 'some_agent', 4, use_replication_network=False,
app=FakeApp(), test=self)
old_sleep = internal_client.sleep
internal_client.sleep = client.sleep
try:
client.make_request('GET', '/', {}, (200,))
finally:
internal_client.sleep = old_sleep
self.assertEqual(3, client.sleep_called)
self.assertEqual(4, client.app.tries)
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
def test_base_request_timeout(self):
# verify that base_request passes timeout arg on to urlopen
body = {"some": "content"}
for timeout in (0.0, 42.0, None):
mocked_func = 'swift.common.internal_client.urllib2.urlopen'
with mock.patch(mocked_func) as mock_urlopen:
mock_urlopen.side_effect = [FakeConn(body)]
sc = internal_client.SimpleClient('http://0.0.0.0/')
_, resp_body = sc.base_request('GET', timeout=timeout)
mock_urlopen.assert_called_once_with(mock.ANY, timeout=timeout)
# sanity check
self.assertEqual(body, resp_body)
def test_base_full_listing(self):
body1 = [{'name': 'a'}, {'name': "b"}, {'name': "c"}]
body2 = [{'name': 'd'}]
body3 = []
mocked_func = 'swift.common.internal_client.urllib2.urlopen'
with mock.patch(mocked_func) as mock_urlopen:
mock_urlopen.side_effect = [
FakeConn(body1), FakeConn(body2), FakeConn(body3)]
sc = internal_client.SimpleClient('http://0.0.0.0/')
_, resp_body = sc.base_request('GET', full_listing=True)
self.assertEqual(body1 + body2, resp_body)
self.assertEqual(3, mock_urlopen.call_count)
actual_requests = [call[0][0] for call in mock_urlopen.call_args_list]
if six.PY2:
# The get_selector method was deprecated in favor of a selector
# attribute in py31 and removed in py34
self.assertEqual(
'/?format=json', actual_requests[0].get_selector())
self.assertEqual(
'/?format=json&marker=c', actual_requests[1].get_selector())
self.assertEqual(
'/?format=json&marker=d', actual_requests[2].get_selector())
else:
self.assertEqual('/?format=json', actual_requests[0].selector)
self.assertEqual(
'/?format=json&marker=c', actual_requests[1].selector)
self.assertEqual(
'/?format=json&marker=d', actual_requests[2].selector)
def test_make_request_method_path_headers(self):
class FakeApp(FakeSwift):
def __init__(self):
super(FakeApp, self).__init__()
self.env = None
def __call__(self, env, start_response):
self.env = env
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp())
for method in 'GET PUT HEAD'.split():
client.make_request(method, '/', {}, (200,))
self.assertEqual(client.app.env['REQUEST_METHOD'], method)
for path in '/one /two/three'.split():
client.make_request('GET', path, {'X-Test': path}, (200,))
self.assertEqual(client.app.env['PATH_INFO'], path)
self.assertEqual(client.app.env['HTTP_X_TEST'], path)
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
def test_make_request_error_case(self):
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
body = b'fake error response'
start_response('409 Conflict',
[('Content-Length', str(len(body)))])
return [body]
final_fake_app = FakeApp()
fake_app = ProxyLoggingMiddleware(
final_fake_app, {}, final_fake_app.logger)
fake_app._pipeline_final_app = final_fake_app
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False, app=fake_app)
with self.assertRaises(internal_client.UnexpectedResponse), \
mock.patch('swift.common.internal_client.sleep'):
client.make_request('DELETE', '/container', {}, (200,))
# Since we didn't provide an X-Timestamp, retrying gives us a chance to
# succeed (assuming the failure was due to clock skew between servers)
expected = (' HTTP/1.0 409 ',)
logger = client.app._pipeline_final_app.logger
loglines = logger.get_lines_for_level('info')
for expected, logline in zip_longest(expected, loglines):
if not expected:
self.fail('Unexpected extra log line: %r' % logline)
self.assertIn(expected, logline)
self.assertEqual(client.app.app.backend_user_agent, 'some_agent')
def test_make_request_acceptable_status_not_2xx(self):
class FakeApp(FakeSwift):
def __init__(self):
super(FakeApp, self).__init__()
self.closed_paths = []
self.fully_read_paths = []
self.resp_status = None
def __call__(self, env, start_response):
body = b'fake error response'
start_response(self.resp_status,
[('Content-Length', str(len(body)))])
return LeakTrackingIter(body, self.closed_paths.append,
self.fully_read_paths.append,
env['PATH_INFO'])
def do_test(resp_status):
final_fake_app = FakeApp()
fake_app = ProxyLoggingMiddleware(
final_fake_app, {}, final_fake_app.logger)
fake_app._pipeline_final_app = final_fake_app
final_fake_app.resp_status = resp_status
client = internal_client.InternalClient(
None, "some_agent", 3, use_replication_network=False,
app=fake_app)
with self.assertRaises(internal_client.UnexpectedResponse) as ctx,\
mock.patch('swift.common.internal_client.sleep'):
# This is obvious strange tests to expect only 400 Bad Request
# but this test intended to avoid extra body drain if it's
# correct object body with 2xx.
client.make_request('GET', '/cont/obj', {}, (400,))
logger = client.app._pipeline_final_app.logger
loglines = logger.get_lines_for_level('info')
self.assertEqual(client.app.app.backend_user_agent, 'some_agent')
return (client.app._pipeline_final_app.fully_read_paths,
client.app._pipeline_final_app.closed_paths,
ctx.exception.resp, loglines)
fully_read_paths, closed_paths, resp, loglines = do_test('200 OK')
# Since the 200 is considered "properly handled", it won't be retried
self.assertEqual(fully_read_paths, [])
self.assertEqual(closed_paths, [])
# ...and it'll be on us (the caller) to read and close (for example,
# by using swob.Response's body property)
self.assertEqual(resp.body, b'fake error response')
self.assertEqual(fully_read_paths, ['/cont/obj'])
self.assertEqual(closed_paths, ['/cont/obj'])
expected = (' HTTP/1.0 200 ', )
for expected, logline in zip_longest(expected, loglines):
if not expected:
self.fail('Unexpected extra log line: %r' % logline)
self.assertIn(expected, logline)
fully_read_paths, closed_paths, resp, loglines = do_test(
'503 Service Unavailable')
# But since 5xx is neither "properly handled" not likely to include
# a large body, it will be retried and responses will already be closed
self.assertEqual(fully_read_paths, ['/cont/obj'] * 3)
self.assertEqual(closed_paths, ['/cont/obj'] * 3)
expected = (' HTTP/1.0 503 ', ' HTTP/1.0 503 ', ' HTTP/1.0 503 ', )
for expected, logline in zip_longest(expected, loglines):
if not expected:
self.fail('Unexpected extra log line: %r' % logline)
self.assertIn(expected, logline)
def test_make_request_codes(self):
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp())
try:
old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
client.make_request('GET', '/', {}, (200,))
client.make_request('GET', '/', {}, (2,))
client.make_request('GET', '/', {}, (400, 200))
client.make_request('GET', '/', {}, (400, 2))
with self.assertRaises(internal_client.UnexpectedResponse) \
as raised:
client.make_request('GET', '/', {}, (400,))
self.assertEqual(200, raised.exception.resp.status_int)
with self.assertRaises(internal_client.UnexpectedResponse) \
as raised:
client.make_request('GET', '/', {}, (201,))
self.assertEqual(200, raised.exception.resp.status_int)
with self.assertRaises(internal_client.UnexpectedResponse) \
as raised:
client.make_request('GET', '/', {}, (111,))
self.assertTrue(str(raised.exception).startswith(
'Unexpected response'))
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
finally:
internal_client.sleep = old_sleep
def test_make_request_calls_fobj_seek_each_try(self):
class FileObject(object):
def __init__(self, test):
self.test = test
self.seek_called = 0
def seek(self, offset, whence=0):
self.seek_called += 1
self.test.assertEqual(0, offset)
self.test.assertEqual(0, whence)
class FakeApp(FakeSwift):
def __init__(self, status):
super(FakeApp, self).__init__()
self.status = status
def __call__(self, env, start_response):
start_response(self.status, [('Content-Length', '0')])
self._calls.append('')
return []
def do_test(status, expected_calls):
fobj = FileObject(self)
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp(status))
with mock.patch.object(internal_client, 'sleep', not_sleep):
with self.assertRaises(Exception) as exc_mgr:
client.make_request('PUT', '/', {}, (2,), fobj)
self.assertEqual(int(status[:3]),
exc_mgr.exception.resp.status_int)
self.assertEqual(client.app.call_count, fobj.seek_called)
self.assertEqual(client.app.call_count, expected_calls)
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
do_test('404 Not Found', 1)
do_test('503 Service Unavailable', 3)
def test_make_request_request_exception(self):
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
raise Exception()
client = internal_client.InternalClient(
None, 'some_agent', 3, app=FakeApp())
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
try:
old_sleep = internal_client.sleep
internal_client.sleep = not_sleep
self.assertRaises(
Exception, client.make_request, 'GET', '/', {}, (2,))
finally:
internal_client.sleep = old_sleep
def test_get_metadata(self):
class Response(object):
def __init__(self, headers):
self.headers = headers
self.status_int = 200
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp_headers):
self.test = test
self.path = path
self.resp_headers = resp_headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None, params=None):
self.make_request_called += 1
self.test.assertEqual('HEAD', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual((2,), acceptable_statuses)
self.test.assertIsNone(body_file)
return Response(self.resp_headers)
path = 'some_path'
metadata_prefix = 'some_key-'
resp_headers = {
'%sone' % (metadata_prefix): '1',
'%sTwo' % (metadata_prefix): '2',
'%sThree' % (metadata_prefix): '3',
'some_header-four': '4',
'Some_header-five': '5',
}
exp_metadata = {
'one': '1',
'two': '2',
'three': '3',
}
client = InternalClient(self, path, resp_headers)
metadata = client._get_metadata(path, metadata_prefix)
self.assertEqual(exp_metadata, metadata)
self.assertEqual(1, client.make_request_called)
def test_get_metadata_invalid_status(self):
class FakeApp(FakeSwift):
def __call__(self, environ, start_response):
start_response('404 Not Found', [('x-foo', 'bar')])
return [b'nope']
client = internal_client.InternalClient(
None, 'test', 1, use_replication_network=False, app=FakeApp())
self.assertRaises(internal_client.UnexpectedResponse,
client._get_metadata, 'path')
metadata = client._get_metadata('path', metadata_prefix='x-',
acceptable_statuses=(4,))
self.assertEqual(metadata, {'foo': 'bar'})
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'test')
def test_make_path(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
c = InternalClient()
self.assertEqual(path, c.make_path(account, container, obj))
def test_make_path_exception(self):
c = InternalClient()
self.assertRaises(ValueError, c.make_path, 'account', None, 'obj')
def test_iter_items(self):
class Response(object):
def __init__(self, status_int, body):
self.status_int = status_int
self.body = body
class InternalClient(internal_client.InternalClient):
def __init__(self, test, responses):
self.test = test
self.responses = responses
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
return self.responses.pop(0)
exp_items = []
responses = [Response(200, json.dumps([]).encode('ascii')), ]
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEqual(exp_items, items)
exp_items = []
responses = []
for i in range(3):
data = [
{'name': 'item%02d' % (2 * i)},
{'name': 'item%02d' % (2 * i + 1)}]
responses.append(Response(200, json.dumps(data).encode('ascii')))
exp_items.extend(data)
responses.append(Response(204, ''))
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEqual(exp_items, items)
def test_iter_items_with_markers(self):
class Response(object):
def __init__(self, status_int, body):
self.status_int = status_int
self.body = body.encode('ascii')
class InternalClient(internal_client.InternalClient):
def __init__(self, test, paths, responses):
self.test = test
self.paths = paths
self.responses = responses
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
exp_path = self.paths.pop(0)
self.test.assertEqual(exp_path, path)
return self.responses.pop(0)
paths = [
'/?format=json&marker=start&end_marker=end&prefix=',
'/?format=json&marker=one%C3%A9&end_marker=end&prefix=',
'/?format=json&marker=two&end_marker=end&prefix=',
]
responses = [
Response(200, json.dumps([{
'name': b'one\xc3\xa9'.decode('utf8')}, ])),
Response(200, json.dumps([{'name': 'two'}, ])),
Response(204, ''),
]
items = []
client = InternalClient(self, paths, responses)
for item in client._iter_items('/', marker='start', end_marker='end'):
items.append(item['name'].encode('utf8'))
self.assertEqual(b'one\xc3\xa9 two'.split(), items)
def test_iter_items_with_markers_and_prefix(self):
class Response(object):
def __init__(self, status_int, body):
self.status_int = status_int
self.body = body.encode('ascii')
class InternalClient(internal_client.InternalClient):
def __init__(self, test, paths, responses):
self.test = test
self.paths = paths
self.responses = responses
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
exp_path = self.paths.pop(0)
self.test.assertEqual(exp_path, path)
return self.responses.pop(0)
paths = [
'/?format=json&marker=prefixed_start&end_marker=prefixed_end'
'&prefix=prefixed_',
'/?format=json&marker=prefixed_one%C3%A9&end_marker=prefixed_end'
'&prefix=prefixed_',
'/?format=json&marker=prefixed_two&end_marker=prefixed_end'
'&prefix=prefixed_',
]
responses = [
Response(200, json.dumps([{
'name': b'prefixed_one\xc3\xa9'.decode('utf8')}, ])),
Response(200, json.dumps([{'name': 'prefixed_two'}, ])),
Response(204, ''),
]
items = []
client = InternalClient(self, paths, responses)
for item in client._iter_items('/', marker='prefixed_start',
end_marker='prefixed_end',
prefix='prefixed_'):
items.append(item['name'].encode('utf8'))
self.assertEqual(b'prefixed_one\xc3\xa9 prefixed_two'.split(), items)
def test_iter_item_read_response_if_status_is_acceptable(self):
class Response(object):
def __init__(self, status_int, body, app_iter):
self.status_int = status_int
self.body = body
self.app_iter = app_iter
class InternalClient(internal_client.InternalClient):
def __init__(self, test, responses):
self.test = test
self.responses = responses
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
resp = self.responses.pop(0)
if resp.status_int in acceptable_statuses or \
resp.status_int // 100 in acceptable_statuses:
return resp
if resp:
raise internal_client.UnexpectedResponse(
'Unexpected response: %s' % resp.status_int, resp)
num_list = []
def generate_resp_body():
for i in range(1, 5):
yield str(i).encode('ascii')
num_list.append(i)
exp_items = []
responses = [Response(204, json.dumps([]).encode('ascii'),
generate_resp_body())]
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEqual(exp_items, items)
self.assertEqual(len(num_list), 0)
responses = [Response(300, json.dumps([]).encode('ascii'),
generate_resp_body())]
client = InternalClient(self, responses)
self.assertRaises(internal_client.UnexpectedResponse,
next, client._iter_items('/'))
exp_items = []
responses = [Response(404, json.dumps([]).encode('ascii'),
generate_resp_body())]
items = []
client = InternalClient(self, responses)
for item in client._iter_items('/'):
items.append(item)
self.assertEqual(exp_items, items)
self.assertEqual(len(num_list), 4)
def test_set_metadata(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, exp_headers):
self.test = test
self.path = path
self.exp_headers = exp_headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEqual('POST', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual(self.exp_headers, headers)
self.test.assertEqual((2,), acceptable_statuses)
self.test.assertIsNone(body_file)
path = 'some_path'
metadata_prefix = 'some_key-'
metadata = {
'%sone' % (metadata_prefix): '1',
'%stwo' % (metadata_prefix): '2',
'three': '3',
}
exp_headers = {
'%sone' % (metadata_prefix): '1',
'%stwo' % (metadata_prefix): '2',
'%sthree' % (metadata_prefix): '3',
}
client = InternalClient(self, path, exp_headers)
client._set_metadata(path, metadata, metadata_prefix)
self.assertEqual(1, client.make_request_called)
def test_iter_containers(self):
account, container, obj = path_parts()
path = make_path(account)
items = '0 1 2'.split()
marker = 'some_marker'
end_marker = 'some_end_marker'
prefix = 'some_prefix'
acceptable_statuses = 'some_status_list'
client = IterInternalClient(
self, path, marker, end_marker, prefix, acceptable_statuses, items)
ret_items = []
for container in client.iter_containers(
account, marker, end_marker, prefix,
acceptable_statuses=acceptable_statuses):
ret_items.append(container)
self.assertEqual(items, ret_items)
def test_create_account(self):
account, container, obj = path_parts()
path = make_path_info(account)
client, app = get_client_app()
app.register('PUT', path, swob.HTTPCreated, {})
client.create_account(account)
self.assertEqual([('PUT', path, {
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'User-Agent': 'test'
})], app._calls)
self.assertEqual(app.backend_user_agent, 'test')
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
def test_delete_account(self):
account, container, obj = path_parts()
path = make_path_info(account)
client, app = get_client_app()
app.register('DELETE', path, swob.HTTPNoContent, {})
client.delete_account(account)
self.assertEqual(1, len(app._calls))
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_get_account_info(self):
class Response(object):
def __init__(self, containers, objects):
self.headers = {
'x-account-container-count': containers,
'x-account-object-count': objects,
}
self.status_int = 200
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp):
self.test = test
self.path = path
self.resp = resp
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.test.assertEqual('HEAD', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual({}, headers)
self.test.assertEqual((2, 404), acceptable_statuses)
self.test.assertIsNone(body_file)
return self.resp
account, container, obj = path_parts()
path = make_path(account)
containers, objects = 10, 100
client = InternalClient(self, path, Response(containers, objects))
info = client.get_account_info(account)
self.assertEqual((containers, objects), info)
def test_get_account_info_404(self):
class Response(object):
def __init__(self):
self.headers = {
'x-account-container-count': 10,
'x-account-object-count': 100,
}
self.status_int = 404
class InternalClient(internal_client.InternalClient):
def __init__(self):
pass
def make_path(self, *a, **kw):
return 'some_path'
def make_request(self, *a, **kw):
return Response()
client = InternalClient()
info = client.get_account_info('some_account')
self.assertEqual((0, 0), info)
def test_get_account_metadata(self):
account, container, obj = path_parts()
path = make_path(account)
acceptable_statuses = 'some_status_list'
metadata_prefix = 'some_metadata_prefix'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_account_metadata(
account, metadata_prefix, acceptable_statuses)
self.assertEqual(client.metadata, metadata)
self.assertEqual(1, client.get_metadata_called)
def test_get_metadadata_with_acceptable_status(self):
account, container, obj = path_parts()
path = make_path_info(account)
client, app = get_client_app()
resp_headers = {'some-important-header': 'some value'}
app.register('GET', path, swob.HTTPOk, resp_headers)
metadata = client.get_account_metadata(
account, acceptable_statuses=(2, 4))
self.assertEqual(metadata['some-important-header'],
'some value')
app.register('GET', path, swob.HTTPNotFound, resp_headers)
metadata = client.get_account_metadata(
account, acceptable_statuses=(2, 4))
self.assertEqual(metadata['some-important-header'],
'some value')
app.register('GET', path, swob.HTTPServerError, resp_headers)
self.assertRaises(internal_client.UnexpectedResponse,
client.get_account_metadata, account,
acceptable_statuses=(2, 4))
def test_set_account_metadata(self):
account, container, obj = path_parts()
path = make_path_info(account)
client, app = get_client_app()
app.register('POST', path, swob.HTTPAccepted, {})
client.set_account_metadata(account, {'Color': 'Blue'},
metadata_prefix='X-Account-Meta-')
self.assertEqual([('POST', path, {
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'X-Account-Meta-Color': 'Blue',
'User-Agent': 'test'
})], app._calls)
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_set_account_metadata_plumbing(self):
account, container, obj = path_parts()
path = make_path(account)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_account_metadata(
account, metadata, metadata_prefix, acceptable_statuses)
self.assertEqual(1, client.set_metadata_called)
def test_container_exists(self):
class Response(object):
def __init__(self, status_int):
self.status_int = status_int
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, resp):
self.test = test
self.path = path
self.make_request_called = 0
self.resp = resp
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEqual('HEAD', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual({}, headers)
self.test.assertEqual((2, 404), acceptable_statuses)
self.test.assertIsNone(body_file)
return self.resp
account, container, obj = path_parts()
path = make_path(account, container)
client = InternalClient(self, path, Response(200))
self.assertEqual(True, client.container_exists(account, container))
self.assertEqual(1, client.make_request_called)
client = InternalClient(self, path, Response(404))
self.assertEqual(False, client.container_exists(account, container))
self.assertEqual(1, client.make_request_called)
def test_create_container(self):
account, container, obj = path_parts()
path = make_path_info(account, container)
client, app = get_client_app()
app.register('PUT', path, swob.HTTPCreated, {})
client.create_container(account, container)
self.assertEqual([('PUT', path, {
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'User-Agent': 'test'
})], app._calls)
self.assertEqual(app.backend_user_agent, 'test')
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
def test_create_container_plumbing(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers):
self.test = test
self.path = path
self.headers = headers
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEqual('PUT', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual(self.headers, headers)
self.test.assertEqual((2,), acceptable_statuses)
self.test.assertIsNone(body_file)
account, container, obj = path_parts()
path = make_path(account, container)
headers = 'some_headers'
client = InternalClient(self, path, headers)
client.create_container(account, container, headers)
self.assertEqual(1, client.make_request_called)
def test_delete_container(self):
account, container, obj = path_parts()
path = make_path_info(account, container)
client, app = get_client_app()
app.register('DELETE', path, swob.HTTPNoContent, {})
client.delete_container(account, container)
self.assertEqual(1, len(app._calls))
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_delete_container_plumbing(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path):
self.test = test
self.path = path
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None):
self.make_request_called += 1
self.test.assertEqual('DELETE', method)
self.test.assertEqual(self.path, path)
self.test.assertEqual({}, headers)
self.test.assertEqual((2, 404), acceptable_statuses)
self.test.assertIsNone(body_file)
account, container, obj = path_parts()
path = make_path(account, container)
client = InternalClient(self, path)
client.delete_container(account, container)
self.assertEqual(1, client.make_request_called)
def test_get_container_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container)
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_container_metadata(
account, container, metadata_prefix, acceptable_statuses)
self.assertEqual(client.metadata, metadata)
self.assertEqual(1, client.get_metadata_called)
def test_iter_objects(self):
account, container, obj = path_parts()
path = make_path(account, container)
marker = 'some_maker'
end_marker = 'some_end_marker'
prefix = 'some_prefix'
acceptable_statuses = 'some_status_list'
items = '0 1 2'.split()
client = IterInternalClient(
self, path, marker, end_marker, prefix, acceptable_statuses, items)
ret_items = []
for obj in client.iter_objects(
account, container, marker, end_marker, prefix,
acceptable_statuses):
ret_items.append(obj)
self.assertEqual(items, ret_items)
def test_set_container_metadata(self):
account, container, obj = path_parts()
path = make_path_info(account, container)
client, app = get_client_app()
app.register('POST', path, swob.HTTPAccepted, {})
client.set_container_metadata(account, container, {'Color': 'Blue'},
metadata_prefix='X-Container-Meta-')
self.assertEqual([('POST', path, {
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'X-Container-Meta-Color': 'Blue',
'User-Agent': 'test'
})], app._calls)
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_set_container_metadata_plumbing(self):
account, container, obj = path_parts()
path = make_path(account, container)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_container_metadata(
account, container, metadata, metadata_prefix, acceptable_statuses)
self.assertEqual(1, client.set_metadata_called)
def test_delete_object(self):
account, container, obj = path_parts()
path = make_path_info(account, container, obj)
client, app = get_client_app()
app.register('DELETE', path, swob.HTTPNoContent, {})
client.delete_object(account, container, obj)
self.assertEqual(app.unclosed_requests, {})
self.assertEqual(1, len(app._calls))
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
app.register('DELETE', path, swob.HTTPNotFound, {})
client.delete_object(account, container, obj)
self.assertEqual(app.unclosed_requests, {})
self.assertEqual(2, len(app._calls))
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
def test_get_object_metadata(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = GetMetadataInternalClient(
self, path, metadata_prefix, acceptable_statuses)
metadata = client.get_object_metadata(
account, container, obj, metadata_prefix,
acceptable_statuses)
self.assertEqual(client.metadata, metadata)
self.assertEqual(1, client.get_metadata_called)
def test_get_metadata_extra_headers(self):
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
self.req_env = env
start_response('200 Ok', [('Content-Length', '0')])
return []
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp())
headers = {'X-Foo': 'bar'}
client.get_object_metadata('account', 'container', 'obj',
headers=headers)
self.assertEqual(client.app.req_env['HTTP_X_FOO'], 'bar')
def test_get_object(self):
account, container, obj = path_parts()
path_info = make_path_info(account, container, obj)
client, app = get_client_app()
headers = {'foo': 'bar'}
body = b'some_object_body'
params = {'symlink': 'get'}
app.register('GET', path_info, swob.HTTPOk, headers, body)
req_headers = {'x-important-header': 'some_important_value'}
status_int, resp_headers, obj_iter = client.get_object(
account, container, obj, req_headers, params=params)
self.assertEqual(status_int // 100, 2)
for k, v in headers.items():
self.assertEqual(v, resp_headers[k])
self.assertEqual(b''.join(obj_iter), body)
self.assertEqual(resp_headers['content-length'], str(len(body)))
self.assertEqual(app.call_count, 1)
req_headers.update({
'host': 'localhost:80', # from swob.Request.blank
'x-backend-allow-reserved-names': 'true', # also from IC
'x-backend-storage-policy-index': '2', # from proxy-server app
'user-agent': 'test',
})
self.assertEqual(app.calls_with_headers, [(
'GET', path_info + '?symlink=get', HeaderKeyDict(req_headers))])
def test_iter_object_lines(self):
class FakeApp(FakeSwift):
def __init__(self, lines):
super(FakeApp, self).__init__()
self.lines = lines
def __call__(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return [b'%s\n' % x for x in self.lines]
lines = b'line1 line2 line3'.split()
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp(lines))
ret_lines = []
for line in client.iter_object_lines('account', 'container', 'object'):
ret_lines.append(line)
self.assertEqual(lines, ret_lines)
self.assertEqual(client.app._pipeline_final_app.backend_user_agent,
'some_agent')
def test_iter_object_lines_compressed_object(self):
class FakeApp(FakeSwift):
def __init__(self, lines):
super(FakeApp, self).__init__()
self.lines = lines
def __call__(self, env, start_response):
start_response('200 Ok', [('Content-Length', '0')])
return internal_client.CompressingFileReader(
BytesIO(b'\n'.join(self.lines)))
lines = b'line1 line2 line3'.split()
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp(lines))
ret_lines = []
for line in client.iter_object_lines(
'account', 'container', 'object.gz'):
ret_lines.append(line)
self.assertEqual(lines, ret_lines)
def test_iter_object_lines_404(self):
class FakeApp(FakeSwift):
def __call__(self, env, start_response):
start_response('404 Not Found', [])
return [b'one\ntwo\nthree']
client = internal_client.InternalClient(
None, 'some_agent', 3, use_replication_network=False,
app=FakeApp())
lines = []
for line in client.iter_object_lines(
'some_account', 'some_container', 'some_object',
acceptable_statuses=(2, 404)):
lines.append(line)
self.assertEqual([], lines)
def test_set_object_metadata(self):
account, container, obj = path_parts()
path = make_path_info(account, container, obj)
client, app = get_client_app()
app.register('POST', path, swob.HTTPAccepted, {})
client.set_object_metadata(account, container, obj, {'Color': 'Blue'},
metadata_prefix='X-Object-Meta-')
self.assertEqual([('POST', path, {
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'X-Object-Meta-Color': 'Blue',
'User-Agent': 'test'
})], app._calls)
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_set_object_metadata_plumbing(self):
account, container, obj = path_parts()
path = make_path(account, container, obj)
metadata = 'some_metadata'
metadata_prefix = 'some_metadata_prefix'
acceptable_statuses = 'some_status_list'
client = SetMetadataInternalClient(
self, path, metadata, metadata_prefix, acceptable_statuses)
client.set_object_metadata(
account, container, obj, metadata, metadata_prefix,
acceptable_statuses)
self.assertEqual(1, client.set_metadata_called)
def test_upload_object(self):
account, container, obj = path_parts()
path = make_path_info(account, container, obj)
client, app = get_client_app()
app.register('PUT', path, swob.HTTPCreated, {})
client.upload_object(BytesIO(b'fobj'), account, container, obj)
self.assertEqual([('PUT', path, {
'Transfer-Encoding': 'chunked',
'X-Backend-Allow-Reserved-Names': 'true',
'Host': 'localhost:80',
'User-Agent': 'test'
})], app._calls)
self.assertEqual({}, app.unread_requests)
self.assertEqual({}, app.unclosed_requests)
self.assertEqual(app.backend_user_agent, 'test')
def test_upload_object_plumbing(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers, fobj):
self.test = test
self.use_replication_network = False
self.path = path
self.headers = headers
self.fobj = fobj
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None, params=None):
self.make_request_called += 1
self.test.assertEqual(self.path, path)
exp_headers = dict(self.headers)
exp_headers['Transfer-Encoding'] = 'chunked'
self.test.assertEqual(exp_headers, headers)
self.test.assertEqual(self.fobj, fobj)
fobj = 'some_fobj'
account, container, obj = path_parts()
path = make_path(account, container, obj)
headers = {'key': 'value'}
client = InternalClient(self, path, headers, fobj)
client.upload_object(fobj, account, container, obj, headers)
self.assertEqual(1, client.make_request_called)
def test_upload_object_not_chunked(self):
class InternalClient(internal_client.InternalClient):
def __init__(self, test, path, headers, fobj):
self.test = test
self.path = path
self.headers = headers
self.fobj = fobj
self.make_request_called = 0
def make_request(
self, method, path, headers, acceptable_statuses,
body_file=None, params=None):
self.make_request_called += 1
self.test.assertEqual(self.path, path)
exp_headers = dict(self.headers)
self.test.assertEqual(exp_headers, headers)
self.test.assertEqual(self.fobj, fobj)
fobj = 'some_fobj'
account, container, obj = path_parts()
path = make_path(account, container, obj)
headers = {'key': 'value', 'Content-Length': len(fobj)}
client = InternalClient(self, path, headers, fobj)
client.upload_object(fobj, account, container, obj, headers)
self.assertEqual(1, client.make_request_called)
class TestGetAuth(unittest.TestCase):
@mock.patch.object(urllib2, 'urlopen')
@mock.patch.object(urllib2, 'Request')
def test_ok(self, request, urlopen):
def getheader(name):
d = {'X-Storage-Url': 'url', 'X-Auth-Token': 'token'}
return d.get(name)
urlopen.return_value.info.return_value.getheader = getheader
url, token = internal_client.get_auth(
'http://127.0.0.1', 'user', 'key')
self.assertEqual(url, "url")
self.assertEqual(token, "token")
request.assert_called_with('http://127.0.0.1')
request.return_value.add_header.assert_any_call('X-Auth-User', 'user')
request.return_value.add_header.assert_any_call('X-Auth-Key', 'key')
def test_invalid_version(self):
self.assertRaises(SystemExit, internal_client.get_auth,
'http://127.0.0.1', 'user', 'key', auth_version=2.0)
class TestSimpleClient(unittest.TestCase):
def _test_get_head(self, request, urlopen, method):
mock_time_value = [1401224049.98]
def mock_time():
# global mock_time_value
mock_time_value[0] += 1
return mock_time_value[0]
with mock.patch('swift.common.internal_client.time', mock_time):
# basic request, only url as kwarg
request.return_value.get_type.return_value = "http"
urlopen.return_value.read.return_value = b''
urlopen.return_value.getcode.return_value = 200
urlopen.return_value.info.return_value = {'content-length': '345'}
sc = internal_client.SimpleClient(url='http://127.0.0.1')
logger = debug_logger('test-ic')
retval = sc.retry_request(
method, headers={'content-length': '123'}, logger=logger)
self.assertEqual(urlopen.call_count, 1)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'content-length': '123'},
data=None)
self.assertEqual([{'content-length': '345'}, None], retval)
self.assertEqual(method, request.return_value.get_method())
self.assertEqual(logger.get_lines_for_level('debug'), [
'-> 2014-05-27T20:54:11 ' + method +
' http://127.0.0.1%3Fformat%3Djson 200 '
'123 345 1401224050.98 1401224051.98 1.0 -'
])
# Check if JSON is decoded
urlopen.return_value.read.return_value = b'{}'
retval = sc.retry_request(method)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with token
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request(method)
request.assert_called_with('http://127.0.0.1?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with prefix
sc = internal_client.SimpleClient(url='http://127.0.0.1',
token='token')
retval = sc.retry_request(method, prefix="pre_")
request.assert_called_with(
'http://127.0.0.1?format=json&prefix=pre_',
headers={'X-Auth-Token': 'token'}, data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with container name
retval = sc.retry_request(method, container='cont')
request.assert_called_with('http://127.0.0.1/cont?format=json',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
# same as above, now with object name
retval = sc.retry_request(method, container='cont', name='obj')
request.assert_called_with('http://127.0.0.1/cont/obj',
headers={'X-Auth-Token': 'token'},
data=None)
self.assertEqual([{'content-length': '345'}, {}], retval)
@mock.patch.object(urllib2, 'urlopen')
@mock.patch.object(urllib2, 'Request')
def test_get(self, request, urlopen):
self._test_get_head(request, urlopen, 'GET')
@mock.patch.object(urllib2, 'urlopen')
@mock.patch.object(urllib2, 'Request')
def test_head(self, request, urlopen):
self._test_get_head(request, urlopen, 'HEAD')
@mock.patch.object(urllib2, 'urlopen')
@mock.patch.object(urllib2, 'Request')
def test_get_with_retries_all_failed(self, request, urlopen):
# Simulate a failing request, ensure retries done
request.return_value.get_type.return_value = "http"
urlopen.side_effect = urllib2.URLError('')
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1)
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(urllib2.URLError, sc.retry_request, 'GET')
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(request.call_count, 2)
self.assertEqual(urlopen.call_count, 2)
@mock.patch.object(urllib2, 'urlopen')
@mock.patch.object(urllib2, 'Request')
def test_get_with_retries(self, request, urlopen):
# First request fails, retry successful
request.return_value.get_type.return_value = "http"
mock_resp = mock.MagicMock()
mock_resp.read.return_value = b''
mock_resp.info.return_value = {}
urlopen.side_effect = [urllib2.URLError(''), mock_resp]
sc = internal_client.SimpleClient(url='http://127.0.0.1', retries=1,
token='token')
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
retval = sc.retry_request('GET')
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(request.call_count, 2)
self.assertEqual(urlopen.call_count, 2)
request.assert_called_with('http://127.0.0.1?format=json', data=None,
headers={'X-Auth-Token': 'token'})
self.assertEqual([{}, None], retval)
self.assertEqual(sc.attempts, 2)
@mock.patch.object(urllib2, 'urlopen')
def test_get_with_retries_param(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
mock_response.info.return_value = {}
mock_urlopen.side_effect = internal_client.httplib.BadStatusLine('')
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
# first without retries param
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(internal_client.httplib.BadStatusLine,
c.retry_request, 'GET')
self.assertEqual(mock_sleep.call_count, 5)
self.assertEqual(mock_urlopen.call_count, 6)
# then with retries param
mock_urlopen.reset_mock()
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
self.assertRaises(internal_client.httplib.BadStatusLine,
c.retry_request, 'GET', retries=2)
self.assertEqual(mock_sleep.call_count, 2)
self.assertEqual(mock_urlopen.call_count, 3)
# and this time with a real response
mock_urlopen.reset_mock()
mock_urlopen.side_effect = [internal_client.httplib.BadStatusLine(''),
mock_response]
with mock.patch('swift.common.internal_client.sleep') as mock_sleep:
retval = c.retry_request('GET', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
self.assertEqual([{}, None], retval)
@mock.patch.object(urllib2, 'urlopen')
def test_request_with_retries_with_HTTPError(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method, retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch.object(urllib2, 'urlopen')
def test_request_container_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method,
container='con', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch.object(urllib2, 'urlopen')
def test_request_object_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method,
container='con', name='obj', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch.object(urllib2, 'urlopen')
def test_delete_object_with_404_no_retry(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
err_args = [None, 404, None, None, None]
mock_urlopen.side_effect = urllib2.HTTPError(*err_args)
with mock.patch('swift.common.internal_client.sleep') as mock_sleep, \
self.assertRaises(exceptions.ClientException) as caught:
internal_client.delete_object('http://127.0.0.1',
container='con', name='obj')
self.assertEqual(caught.exception.http_status, 404)
self.assertEqual(mock_sleep.call_count, 0)
self.assertEqual(mock_urlopen.call_count, 1)
@mock.patch.object(urllib2, 'urlopen')
def test_delete_object_with_409_no_retry(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = b''
err_args = [None, 409, None, None, None]
mock_urlopen.side_effect = urllib2.HTTPError(*err_args)
with mock.patch('swift.common.internal_client.sleep') as mock_sleep, \
self.assertRaises(exceptions.ClientException) as caught:
internal_client.delete_object('http://127.0.0.1',
container='con', name='obj')
self.assertEqual(caught.exception.http_status, 409)
self.assertEqual(mock_sleep.call_count, 0)
self.assertEqual(mock_urlopen.call_count, 1)
def test_proxy(self):
# check that proxy arg is passed through to the urllib Request
scheme = 'http'
proxy_host = '127.0.0.1:80'
proxy = '%s://%s' % (scheme, proxy_host)
url = 'https://127.0.0.1:1/a'
mocked = 'swift.common.internal_client.urllib2.urlopen'
# module level methods
for func in (internal_client.put_object,
internal_client.delete_object):
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
func(url, container='c', name='o1', contents='', proxy=proxy,
timeout=0.1, retries=0)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
if six.PY2:
self.assertEqual(scheme, args[0].type)
else:
# TODO: figure out why this happens, whether py2 or py3 is
# messed up, whether we care, and what can be done about it
self.assertEqual('https', args[0].type)
# class methods
content = mock.MagicMock()
cl = internal_client.SimpleClient(url)
scenarios = ((cl.get_account, []),
(cl.get_container, ['c']),
(cl.put_container, ['c']),
(cl.put_object, ['c', 'o', content]))
for scenario in scenarios:
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
scenario[0](*scenario[1], proxy=proxy, timeout=0.1)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
if six.PY2:
self.assertEqual(scheme, args[0].type)
else:
# See above
self.assertEqual('https', args[0].type)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_internal_client.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.request_helpers"""
import unittest
from swift.common.swob import Request, HTTPException, HeaderKeyDict
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from swift.common import request_helpers as rh
from swift.common.constraints import AUTO_CREATE_ACCOUNT_PREFIX
from test.unit import patch_policies
from test.unit.common.test_utils import FakeResponse
server_types = ['account', 'container', 'object']
class TestRequestHelpers(unittest.TestCase):
def test_constrain_req_limit(self):
req = Request.blank('')
self.assertEqual(10, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=1')
self.assertEqual(1, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=1.0')
self.assertEqual(10, rh.constrain_req_limit(req, 10))
req = Request.blank('', query_string='limit=11')
with self.assertRaises(HTTPException) as raised:
rh.constrain_req_limit(req, 10)
self.assertEqual(raised.exception.status_int, 412)
def test_validate_params(self):
req = Request.blank('')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
self.assertEqual({}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_params(req, ())
self.assertEqual({}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': 'foo'}
self.assertEqual(expected, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': ''}
self.assertEqual(expected, actual)
# ignore bad junk
req = Request.blank('', query_string='limit=1&junk=%ff&marker=foo')
actual = rh.validate_params(req, ('limit', 'marker', 'end_marker'))
expected = {'limit': '1', 'marker': 'foo'}
self.assertEqual(expected, actual)
# error on bad wanted parameter
req = Request.blank('', query_string='limit=1&junk=here&marker=%ff')
with self.assertRaises(HTTPException) as raised:
rh.validate_params(req, ('limit', 'marker', 'end_marker'))
self.assertEqual(raised.exception.status_int, 400)
def test_validate_container_params(self):
req = Request.blank('')
actual = rh.validate_container_params(req)
self.assertEqual({'limit': 10000}, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=foo')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': 'foo'}
self.assertEqual(expected, actual)
req = Request.blank('', query_string='limit=1&junk=here&marker=')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': ''}
self.assertEqual(expected, actual)
# ignore bad junk
req = Request.blank('', query_string='limit=1&junk=%ff&marker=foo')
actual = rh.validate_container_params(req)
expected = {'limit': 1, 'marker': 'foo'}
self.assertEqual(expected, actual)
# error on bad wanted parameter
req = Request.blank('', query_string='limit=1&junk=here&marker=%ff')
with self.assertRaises(HTTPException) as raised:
rh.validate_container_params(req)
self.assertEqual(raised.exception.status_int, 400)
# error on bad limit
req = Request.blank('', query_string='limit=10001')
with self.assertRaises(HTTPException) as raised:
rh.validate_container_params(req)
self.assertEqual(raised.exception.status_int, 412)
def test_is_user_meta(self):
m_type = 'meta'
for st in server_types:
self.assertTrue(rh.is_user_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(rh.is_user_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(rh.is_user_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_meta(self):
m_type = 'sysmeta'
for st in server_types:
self.assertTrue(rh.is_sys_meta(st, 'x-%s-%s-foo' % (st, m_type)))
self.assertFalse(rh.is_sys_meta(st, 'x-%s-%s-' % (st, m_type)))
self.assertFalse(rh.is_sys_meta(st, 'x-%s-%sfoo' % (st, m_type)))
def test_is_sys_or_user_meta(self):
m_types = ['sysmeta', 'meta']
for mt in m_types:
for st in server_types:
self.assertTrue(rh.is_sys_or_user_meta(
st, 'x-%s-%s-foo' % (st, mt)))
self.assertFalse(rh.is_sys_or_user_meta(
st, 'x-%s-%s-' % (st, mt)))
self.assertFalse(rh.is_sys_or_user_meta(
st, 'x-%s-%sfoo' % (st, mt)))
def test_strip_sys_meta_prefix(self):
mt = 'sysmeta'
for st in server_types:
self.assertEqual(rh.strip_sys_meta_prefix(
st, 'x-%s-%s-a' % (st, mt)), 'a')
mt = 'not-sysmeta'
for st in server_types:
with self.assertRaises(ValueError):
rh.strip_sys_meta_prefix(st, 'x-%s-%s-a' % (st, mt))
def test_strip_user_meta_prefix(self):
mt = 'meta'
for st in server_types:
self.assertEqual(rh.strip_user_meta_prefix(
st, 'x-%s-%s-a' % (st, mt)), 'a')
mt = 'not-meta'
for st in server_types:
with self.assertRaises(ValueError):
rh.strip_sys_meta_prefix(st, 'x-%s-%s-a' % (st, mt))
def test_is_object_transient_sysmeta(self):
self.assertTrue(rh.is_object_transient_sysmeta(
'x-object-transient-sysmeta-foo'))
self.assertFalse(rh.is_object_transient_sysmeta(
'x-object-transient-sysmeta-'))
self.assertFalse(rh.is_object_transient_sysmeta(
'x-object-meatmeta-foo'))
def test_strip_object_transient_sysmeta_prefix(self):
mt = 'object-transient-sysmeta'
self.assertEqual(rh.strip_object_transient_sysmeta_prefix(
'x-%s-a' % mt), 'a')
mt = 'object-sysmeta-transient'
with self.assertRaises(ValueError):
rh.strip_object_transient_sysmeta_prefix('x-%s-a' % mt)
def test_remove_items(self):
src = {'a': 'b',
'c': 'd'}
test = lambda x: x == 'a'
rem = rh.remove_items(src, test)
self.assertEqual(src, {'c': 'd'})
self.assertEqual(rem, {'a': 'b'})
def test_copy_header_subset(self):
src = {'a': 'b',
'c': 'd'}
from_req = Request.blank('/path', environ={}, headers=src)
to_req = Request.blank('/path', {})
test = lambda x: x.lower() == 'a'
rh.copy_header_subset(from_req, to_req, test)
self.assertTrue('A' in to_req.headers)
self.assertEqual(to_req.headers['A'], 'b')
self.assertFalse('c' in to_req.headers)
self.assertFalse('C' in to_req.headers)
def test_is_use_replication_network(self):
self.assertFalse(rh.is_use_replication_network())
self.assertFalse(rh.is_use_replication_network({}))
self.assertFalse(rh.is_use_replication_network(
{'x-backend-use-replication-network': 'false'}))
self.assertFalse(rh.is_use_replication_network(
{'x-backend-use-replication-network': 'no'}))
self.assertTrue(rh.is_use_replication_network(
{'x-backend-use-replication-network': 'true'}))
self.assertTrue(rh.is_use_replication_network(
{'x-backend-use-replication-network': 'yes'}))
self.assertTrue(rh.is_use_replication_network(
{'X-Backend-Use-Replication-Network': 'True'}))
def test_get_ip_port(self):
node = {
'ip': '1.2.3.4',
'port': 6000,
'replication_ip': '5.6.7.8',
'replication_port': 7000,
}
self.assertEqual(('1.2.3.4', 6000), rh.get_ip_port(node, {}))
self.assertEqual(('5.6.7.8', 7000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'true'}))
self.assertEqual(('1.2.3.4', 6000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'false'}))
# node trumps absent header and False header
node['use_replication'] = True
self.assertEqual(('5.6.7.8', 7000), rh.get_ip_port(node, {}))
self.assertEqual(('5.6.7.8', 7000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'false'}))
# True header trumps node
node['use_replication'] = False
self.assertEqual(('5.6.7.8', 7000), rh.get_ip_port(node, {
rh.USE_REPLICATION_NETWORK_HEADER: 'true'}))
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_req(self):
path = '/device/part/account/container/object'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 1
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
self.assertEqual(device, 'device')
self.assertEqual(part, 'part')
self.assertEqual(account, 'account')
self.assertEqual(container, 'container')
self.assertEqual(obj, 'object')
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
req.headers['X-Backend-Storage-Policy-Index'] = 'foo'
with self.assertRaises(HTTPException) as raised:
device, part, account, container, obj, policy = \
rh.get_name_and_placement(req, 5, 5, True)
e = raised.exception
self.assertEqual(e.status_int, 503)
self.assertEqual(str(e), '503 Service Unavailable')
self.assertEqual(e.body, b"No policy with index foo")
@patch_policies(with_ec_default=True)
def test_get_name_and_placement_object_replication(self):
# yup, suffixes are sent '-'.joined in the path
path = '/device/part/012-345-678-9ab-cde'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '0'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '012-345-678-9ab-cde')
self.assertEqual(policy, POLICIES[0])
self.assertEqual(policy.policy_type, EC_POLICY)
path = '/device/part'
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertIsNone(suffix_parts) # false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
path = '/device/part/' # with a trailing slash
req = Request.blank(path, headers={
'X-Backend-Storage-Policy-Index': '1'})
device, partition, suffix_parts, policy = \
rh.get_name_and_placement(req, 2, 3, True)
self.assertEqual(device, 'device')
self.assertEqual(partition, 'part')
self.assertEqual(suffix_parts, '') # still false-y
self.assertEqual(policy, POLICIES[1])
self.assertEqual(policy.policy_type, REPL_POLICY)
def test_validate_internal_name(self):
self.assertIsNone(rh._validate_internal_name('foo'))
self.assertIsNone(rh._validate_internal_name(
rh.get_reserved_name('foo')))
self.assertIsNone(rh._validate_internal_name(
rh.get_reserved_name('foo', 'bar')))
self.assertIsNone(rh._validate_internal_name(''))
self.assertIsNone(rh._validate_internal_name(rh.RESERVED))
def test_invalid_reserved_name(self):
with self.assertRaises(HTTPException) as raised:
rh._validate_internal_name('foo' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace name")
def test_validate_internal_account(self):
self.assertIsNone(rh.validate_internal_account('AUTH_foo'))
self.assertIsNone(rh.validate_internal_account(
rh.get_reserved_name('AUTH_foo')))
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_account('AUTH_foo' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
def test_validate_internal_container(self):
self.assertIsNone(rh.validate_internal_container('AUTH_foo', 'bar'))
self.assertIsNone(rh.validate_internal_container(
rh.get_reserved_name('AUTH_foo'), 'bar'))
self.assertIsNone(rh.validate_internal_container(
'foo', rh.get_reserved_name('bar')))
self.assertIsNone(rh.validate_internal_container(
rh.get_reserved_name('AUTH_foo'), rh.get_reserved_name('bar')))
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_container('AUTH_foo' + rh.RESERVED, 'bar')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_container('AUTH_foo', 'bar' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace container")
# These should always be operating on split_path outputs so this
# shouldn't really be an issue, but just in case...
for acct in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_container(
acct, 'bar')
self.assertEqual(raised.exception.args[0], 'Account is required')
def test_validate_internal_object(self):
self.assertIsNone(rh.validate_internal_obj('AUTH_foo', 'bar', 'baz'))
self.assertIsNone(rh.validate_internal_obj(
rh.get_reserved_name('AUTH_foo'), 'bar', 'baz'))
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
self.assertIsNone(rh.validate_internal_obj(
acct,
rh.get_reserved_name('bar'),
rh.get_reserved_name('baz')))
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj(
acct, 'bar', rh.get_reserved_name('baz'))
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace object "
b"in user-namespace container")
for acct in ('AUTH_foo', rh.get_reserved_name('AUTH_foo')):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj(
acct, rh.get_reserved_name('bar'), 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid user-namespace object "
b"in reserved-namespace container")
# These should always be operating on split_path outputs so this
# shouldn't really be an issue, but just in case...
for acct in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_obj(
acct, 'bar', 'baz')
self.assertEqual(raised.exception.args[0], 'Account is required')
for cont in ('', None):
with self.assertRaises(ValueError) as raised:
rh.validate_internal_obj(
'AUTH_foo', cont, 'baz')
self.assertEqual(raised.exception.args[0], 'Container is required')
def test_invalid_names_in_system_accounts(self):
self.assertIsNone(rh.validate_internal_obj(
AUTO_CREATE_ACCOUNT_PREFIX + 'system_account', 'foo',
'crazy%stown' % rh.RESERVED))
def test_invalid_reserved_names(self):
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo' + rh.RESERVED, 'bar', 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace account")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo', 'bar' + rh.RESERVED, 'baz')
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace container")
with self.assertRaises(HTTPException) as raised:
rh.validate_internal_obj('AUTH_foo', 'bar', 'baz' + rh.RESERVED)
e = raised.exception
self.assertEqual(e.status_int, 400)
self.assertEqual(str(e), '400 Bad Request')
self.assertEqual(e.body, b"Invalid reserved-namespace object")
def test_get_reserved_name(self):
expectations = {
tuple(): rh.RESERVED,
('',): rh.RESERVED,
('foo',): rh.RESERVED + 'foo',
('foo', 'bar'): rh.RESERVED + 'foo' + rh.RESERVED + 'bar',
('foo', ''): rh.RESERVED + 'foo' + rh.RESERVED,
('', ''): rh.RESERVED * 2,
}
failures = []
for parts, expected in expectations.items():
name = rh.get_reserved_name(*parts)
if name != expected:
failures.append('get given %r expected %r != %r' % (
parts, expected, name))
if failures:
self.fail('Unexpected reults:\n' + '\n'.join(failures))
def test_invalid_get_reserved_name(self):
self.assertRaises(ValueError)
with self.assertRaises(ValueError) as ctx:
rh.get_reserved_name('foo', rh.RESERVED + 'bar', 'baz')
self.assertEqual(str(ctx.exception),
'Invalid reserved part in components')
def test_split_reserved_name(self):
expectations = {
rh.RESERVED: ('',),
rh.RESERVED + 'foo': ('foo',),
rh.RESERVED + 'foo' + rh.RESERVED + 'bar': ('foo', 'bar'),
rh.RESERVED + 'foo' + rh.RESERVED: ('foo', ''),
rh.RESERVED * 2: ('', ''),
}
failures = []
for name, expected in expectations.items():
parts = rh.split_reserved_name(name)
if tuple(parts) != expected:
failures.append('split given %r expected %r != %r' % (
name, expected, parts))
if failures:
self.fail('Unexpected reults:\n' + '\n'.join(failures))
def test_invalid_split_reserved_name(self):
self.assertRaises(ValueError)
with self.assertRaises(ValueError) as ctx:
rh.split_reserved_name('foo')
self.assertEqual(str(ctx.exception),
'Invalid reserved name')
class TestHTTPResponseToDocumentIters(unittest.TestCase):
def test_200(self):
fr = FakeResponse(
200,
{'Content-Length': '10', 'Content-Type': 'application/lunch'},
b'sandwiches')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '10')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
fr = FakeResponse(
200,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch'},
b'sandwiches')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertIsNone(last_byte)
self.assertIsNone(length)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Transfer-Encoding'), 'chunked')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sandwiches')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_single_range(self):
fr = FakeResponse(
206,
{'Content-Length': '8', 'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
b'andwiche')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Length'), '8')
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
# Chunked response should be treated in the same way as non-chunked one
fr = FakeResponse(
206,
{'Transfer-Encoding': 'chunked',
'Content-Type': 'application/lunch',
'Content-Range': 'bytes 1-8/10'},
b'andwiche')
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 1)
self.assertEqual(last_byte, 8)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'andwiche')
self.assertRaises(StopIteration, next, doc_iters)
def test_206_multiple_ranges(self):
fr = FakeResponse(
206,
{'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'},
(b"--asdfasdfasdf\r\n"
b"Content-Type: application/lunch\r\n"
b"Content-Range: bytes 0-3/10\r\n"
b"\r\n"
b"sand\r\n"
b"--asdfasdfasdf\r\n"
b"Content-Type: application/lunch\r\n"
b"Content-Range: bytes 6-9/10\r\n"
b"\r\n"
b"ches\r\n"
b"--asdfasdfasdf--"))
doc_iters = rh.http_response_to_document_iters(fr)
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 0)
self.assertEqual(last_byte, 3)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'sand')
first_byte, last_byte, length, headers, body = next(doc_iters)
self.assertEqual(first_byte, 6)
self.assertEqual(last_byte, 9)
self.assertEqual(length, 10)
header_dict = HeaderKeyDict(headers)
self.assertEqual(header_dict.get('Content-Type'), 'application/lunch')
self.assertEqual(body.read(), b'ches')
self.assertRaises(StopIteration, next, doc_iters)
def test_update_etag_is_at_header(self):
# start with no existing X-Backend-Etag-Is-At
req = Request.blank('/v/a/c/o')
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-My-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag',
req.headers['X-Backend-Etag-Is-At'])
# add another alternate
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-Ec-Etag')
self.assertEqual('X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag',
req.headers['X-Backend-Etag-Is-At'])
with self.assertRaises(ValueError) as cm:
rh.update_etag_is_at_header(req, 'X-Object-Sysmeta-,-Bad')
self.assertEqual('Header name must not contain commas',
cm.exception.args[0])
def test_resolve_etag_is_at_header(self):
def do_test():
req = Request.blank('/v/a/c/o')
# ok to have no X-Backend-Etag-Is-At
self.assertIsNone(rh.resolve_etag_is_at_header(req, metadata))
# ok to have no matching metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Not-There'
self.assertIsNone(rh.resolve_etag_is_at_header(req, metadata))
# selects from metadata
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-Ec-Etag'
self.assertEqual('an etag value',
rh.resolve_etag_is_at_header(req, metadata))
req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-My-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# first in list takes precedence
req.headers['X-Backend-Etag-Is-At'] = \
'X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# non-existent alternates are passed over
req.headers['X-Backend-Etag-Is-At'] = \
'X-Bogus,X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag'
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# spaces in list are ok
alts = 'X-Foo, X-Object-Sysmeta-My-Etag , X-Object-Sysmeta-Ec-Etag'
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# lower case in list is ok
alts = alts.lower()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
# upper case in list is ok
alts = alts.upper()
req.headers['X-Backend-Etag-Is-At'] = alts
self.assertEqual('another etag value',
rh.resolve_etag_is_at_header(req, metadata))
metadata = {'X-Object-Sysmeta-Ec-Etag': 'an etag value',
'X-Object-Sysmeta-My-Etag': 'another etag value'}
do_test()
metadata = dict((k.lower(), v) for k, v in metadata.items())
do_test()
metadata = dict((k.upper(), v) for k, v in metadata.items())
do_test()
| swift-master | test/unit/common/test_request_helpers.py |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import unittest
import uuid
import six
from mock import ANY, patch
from swift.common.container_sync_realms import ContainerSyncRealms
from test.debug_logger import debug_logger
from test.unit import temptree
class TestUtils(unittest.TestCase):
def test_no_file_there(self):
unique = uuid.uuid4().hex
logger = debug_logger()
csr = ContainerSyncRealms(unique, logger)
self.assertEqual(
logger.all_log_lines(),
{'debug': [
"Could not load '%s': [Errno 2] No such file or directory: "
"'%s'" % (unique, unique)]})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), [])
def test_os_error(self):
fname = 'container-sync-realms.conf'
fcontents = ''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
def _mock_getmtime(path):
raise OSError(errno.EACCES,
os.strerror(errno.EACCES) +
": '%s'" % (fpath))
with patch('os.path.getmtime', _mock_getmtime):
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(
logger.all_log_lines(),
{'error': [
"Could not load '%s': [Errno 13] Permission denied: "
"'%s'" % (fpath, fpath)]})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), [])
def test_empty(self):
fname = 'container-sync-realms.conf'
fcontents = ''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(logger.all_log_lines(), {})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), [])
def test_error_parsing(self):
fname = 'container-sync-realms.conf'
fcontents = 'invalid'
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
if six.PY2:
fmt = "Could not load '%s': " \
"File contains no section headers.\n" \
"file: %s, line: 1\n" \
"'invalid'"
else:
fmt = "Could not load '%s': " \
"File contains no section headers.\n" \
"file: '%s', line: 1\n" \
"'invalid'"
self.assertEqual(
logger.all_log_lines(),
{'error': [fmt % (fpath, fpath)]})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), [])
def test_one_realm(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(logger.all_log_lines(), {})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), ['US'])
self.assertEqual(csr.key('US'), '9ff3b71c849749dbaec4ccdd3cbab62b')
self.assertIsNone(csr.key2('US'))
self.assertEqual(csr.clusters('US'), ['DFW1'])
self.assertEqual(
csr.endpoint('US', 'DFW1'), 'http://dfw1.host/v1/')
def test_two_realms_and_change_a_default(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[DEFAULT]
mtime_check_interval = 60
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
[UK]
key = e9569809dc8b4951accc1487aa788012
key2 = f6351bd1cc36413baa43f7ba1b45e51d
cluster_lon3 = http://lon3.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(logger.all_log_lines(), {})
self.assertEqual(csr.mtime_check_interval, 60)
self.assertEqual(sorted(csr.realms()), ['UK', 'US'])
self.assertEqual(csr.key('US'), '9ff3b71c849749dbaec4ccdd3cbab62b')
self.assertIsNone(csr.key2('US'))
self.assertEqual(csr.clusters('US'), ['DFW1'])
self.assertEqual(
csr.endpoint('US', 'DFW1'), 'http://dfw1.host/v1/')
self.assertEqual(csr.key('UK'), 'e9569809dc8b4951accc1487aa788012')
self.assertEqual(
csr.key2('UK'), 'f6351bd1cc36413baa43f7ba1b45e51d')
self.assertEqual(csr.clusters('UK'), ['LON3'])
self.assertEqual(
csr.endpoint('UK', 'LON3'), 'http://lon3.host/v1/')
def test_empty_realm(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(logger.all_log_lines(), {})
self.assertEqual(csr.mtime_check_interval, 300)
self.assertEqual(csr.realms(), ['US'])
self.assertIsNone(csr.key('US'))
self.assertIsNone(csr.key2('US'))
self.assertEqual(csr.clusters('US'), [])
self.assertIsNone(csr.endpoint('US', 'JUST_TESTING'))
def test_bad_mtime_check_interval(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[DEFAULT]
mtime_check_interval = invalid
'''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
logs = logger.all_log_lines()
self.assertEqual(logs, {'error': [ANY]})
line = logs['error'][0]
self.assertIn(
"Error in '%s' with mtime_check_interval: "
"could not convert string to float:" % fpath, line)
self.assertEqual(csr.mtime_check_interval, 300)
def test_get_sig(self):
fname = 'container-sync-realms.conf'
fcontents = ''
with temptree([fname], [fcontents]) as tempdir:
logger = debug_logger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
self.assertEqual(
csr.get_sig(
'GET', '/some/path', '1387212345.67890', 'my_nonce',
'realm_key', 'user_key'),
'5a6eb486eb7b44ae1b1f014187a94529c3f9c8f9')
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_container_sync_realms.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import socket
from eventlet import spawn, Timeout
from swift.common import bufferedhttp
from test import listen_zero
class MockHTTPSConnection(object):
def __init__(self, hostport):
pass
def putrequest(self, method, path, skip_host=0):
self.path = path
pass
def putheader(self, header, *values):
# Verify that path and values can be safely joined
# Essentially what Python 2.7 does that caused us problems.
'\r\n\t'.join((self.path,) + values)
def endheaders(self):
pass
class TestBufferedHTTP(unittest.TestCase):
def test_http_connect(self):
bindsock = listen_zero()
def accept(expected_par):
try:
with Timeout(3):
sock, addr = bindsock.accept()
fp = sock.makefile('rwb')
fp.write(b'HTTP/1.1 200 OK\r\nContent-Length: 8\r\n\r\n'
b'RESPONSE')
fp.flush()
line = fp.readline()
path = (b'/dev/' + expected_par +
b'/path/..%25/?omg=&no=%7F&%FF=%FF&no=%25ff')
self.assertEqual(
line,
b'PUT ' + path + b' HTTP/1.1\r\n')
headers = {}
line = fp.readline()
while line and line != b'\r\n':
headers[line.split(b':')[0].lower()] = \
line.split(b':')[1].strip()
line = fp.readline()
self.assertEqual(headers[b'content-length'], b'7')
self.assertEqual(headers[b'x-header'], b'value')
self.assertEqual(fp.readline(), b'REQUEST\r\n')
except BaseException as err:
return err
return None
for spawn_par, par in (
(b'par', b'par'), (b'up%C3%A5r', u'up\xe5r'),
(b'%C3%BCpar', b'\xc3\xbcpar'), (b'1357', 1357)):
event = spawn(accept, spawn_par)
try:
with Timeout(3):
conn = bufferedhttp.http_connect(
'127.0.0.1', bindsock.getsockname()[1], 'dev', par,
'PUT', '/path/..%/', {
'content-length': 7,
'x-header': 'value'},
query_string='omg&no=%7f&\xff=%ff&no=%25ff')
conn.send(b'REQUEST\r\n')
self.assertTrue(conn.sock.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))
resp = conn.getresponse()
body = resp.read()
conn.close()
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertEqual(body, b'RESPONSE')
finally:
err = event.wait()
if err:
raise Exception(err)
def test_get_expect(self):
bindsock = listen_zero()
request = []
def accept():
with Timeout(3):
sock, addr = bindsock.accept()
fp = sock.makefile('rwb')
request.append(fp.readline())
fp.write(b'HTTP/1.1 100 Continue\r\n\r\n')
fp.flush()
fp.write(b'HTTP/1.1 200 OK\r\nContent-Length: 8\r\n\r\n'
b'RESPONSE')
fp.flush()
server = spawn(accept)
try:
address = '%s:%s' % ('127.0.0.1', bindsock.getsockname()[1])
conn = bufferedhttp.BufferedHTTPConnection(address)
conn.putrequest('GET', '/path')
conn.endheaders()
resp = conn.getexpect()
self.assertIsInstance(resp, bufferedhttp.BufferedHTTPResponse)
self.assertEqual(resp.status, 100)
self.assertEqual(resp.version, 11)
self.assertEqual(resp.reason, 'Continue')
# I don't think you're supposed to "read" a continue response
self.assertRaises(AssertionError, resp.read)
resp = conn.getresponse()
self.assertIsInstance(resp, bufferedhttp.BufferedHTTPResponse)
self.assertEqual(resp.read(), b'RESPONSE')
finally:
server.wait()
self.assertEqual(request[0], b'GET /path HTTP/1.1\r\n')
def test_closed_response(self):
resp = bufferedhttp.BufferedHTTPResponse(None)
self.assertEqual(resp.status, 'UNKNOWN')
self.assertEqual(resp.version, 'UNKNOWN')
self.assertEqual(resp.reason, 'UNKNOWN')
self.assertEqual(resp.read(), b'')
def test_nonstr_header_values(self):
with mock.patch('swift.common.bufferedhttp.HTTPSConnection',
MockHTTPSConnection):
bufferedhttp.http_connect(
'127.0.0.1', 8080, 'sda', 1, 'GET', '/',
headers={'x-one': '1', 'x-two': 2, 'x-three': 3.0,
'x-four': {'crazy': 'value'}}, ssl=True)
bufferedhttp.http_connect_raw(
'127.0.0.1', 8080, 'GET', '/',
headers={'x-one': '1', 'x-two': 2, 'x-three': 3.0,
'x-four': {'crazy': 'value'}}, ssl=True)
def test_unicode_values(self):
with mock.patch('swift.common.bufferedhttp.HTTPSConnection',
MockHTTPSConnection):
for dev in ('sda', u'sda', u'sdá', u'sdá'.encode('utf-8')):
for path in (
'/v1/a', u'/v1/a', u'/v1/á', u'/v1/á'.encode('utf-8')):
for header in ('abc', u'abc', u'ábc'.encode('utf-8')):
try:
bufferedhttp.http_connect(
'127.0.0.1', 8080, dev, 1, 'GET', path,
headers={'X-Container-Meta-Whatever': header},
ssl=True)
except Exception as e:
self.fail(
'Exception %r for device=%r path=%r header=%r'
% (e, dev, path, header))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_bufferedhttp.py |
# -*- coding:utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
import itertools
from collections import defaultdict
import errno
import io
import logging
import six
import socket
import time
import unittest
import os
import mock
from six.moves.configparser import NoSectionError, NoOptionError
from eventlet import GreenPool, sleep, Queue
from eventlet.pools import Pool
from eventlet.green import ssl
from swift.common import memcached
from swift.common.memcached import MemcacheConnectionError, md5hash, \
MemcacheCommand
from swift.common.utils import md5, human_readable
from mock import patch, MagicMock
from test.debug_logger import debug_logger
class MockedMemcachePool(memcached.MemcacheConnPool):
def __init__(self, mocks):
Pool.__init__(self, max_size=2)
self.mocks = mocks
# setting this for the eventlet workaround in the MemcacheConnPool
self._parent_class_getter = super(memcached.MemcacheConnPool, self).get
def create(self):
return self.mocks.pop(0)
class ExplodingMockMemcached(object):
should_explode = True
exploded = False
def sendall(self, string):
if self.should_explode:
self.exploded = True
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
def readline(self):
if self.should_explode:
self.exploded = True
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
return b'STORED\r\n'
def read(self, size):
if self.should_explode:
self.exploded = True
raise socket.error(errno.EPIPE, os.strerror(errno.EPIPE))
def close(self):
pass
TOO_BIG_KEY = md5(
b'too-big', usedforsecurity=False).hexdigest().encode('ascii')
class MockMemcached(object):
# See https://github.com/memcached/memcached/blob/master/doc/protocol.txt
# In particular, the "Storage commands" section may be interesting.
def __init__(self):
self.inbuf = b''
self.outbuf = b''
self.cache = {}
self.down = False
self.exc_on_delete = False
self.read_return_none = False
self.read_return_empty_str = False
self.close_called = False
def sendall(self, string):
if self.down:
raise Exception('mock is down')
self.inbuf += string
while b'\n' in self.inbuf:
cmd, self.inbuf = self.inbuf.split(b'\n', 1)
parts = cmd.split()
cmd_name = parts[0].decode('ascii').lower()
handler = getattr(self, 'handle_%s' % cmd_name, None)
if handler:
handler(*parts[1:])
else:
raise ValueError('Unhandled command: %s' % parts[0])
def handle_set(self, key, flags, exptime, num_bytes, noreply=b''):
self.cache[key] = flags, exptime, self.inbuf[:int(num_bytes)]
self.inbuf = self.inbuf[int(num_bytes) + 2:]
if noreply != b'noreply':
if key == TOO_BIG_KEY:
self.outbuf += b'SERVER_ERROR object too large for cache\r\n'
else:
self.outbuf += b'STORED\r\n'
def handle_add(self, key, flags, exptime, num_bytes, noreply=b''):
value = self.inbuf[:int(num_bytes)]
self.inbuf = self.inbuf[int(num_bytes) + 2:]
if key in self.cache:
if noreply != b'noreply':
self.outbuf += b'NOT_STORED\r\n'
else:
self.cache[key] = flags, exptime, value
if noreply != b'noreply':
self.outbuf += b'STORED\r\n'
def handle_delete(self, key, noreply=b''):
if self.exc_on_delete:
raise Exception('mock is has exc_on_delete set')
if key in self.cache:
del self.cache[key]
if noreply != b'noreply':
self.outbuf += b'DELETED\r\n'
elif noreply != b'noreply':
self.outbuf += b'NOT_FOUND\r\n'
def handle_get(self, *keys):
for key in keys:
if key in self.cache:
val = self.cache[key]
self.outbuf += b' '.join([
b'VALUE',
key,
val[0],
str(len(val[2])).encode('ascii')
]) + b'\r\n'
self.outbuf += val[2] + b'\r\n'
self.outbuf += b'END\r\n'
def handle_incr(self, key, value, noreply=b''):
if key in self.cache:
current = self.cache[key][2]
new_val = str(int(current) + int(value)).encode('ascii')
self.cache[key] = self.cache[key][:2] + (new_val, )
self.outbuf += new_val + b'\r\n'
else:
self.outbuf += b'NOT_FOUND\r\n'
def handle_decr(self, key, value, noreply=b''):
if key in self.cache:
current = self.cache[key][2]
new_val = str(int(current) - int(value)).encode('ascii')
if new_val[:1] == b'-': # ie, val is negative
new_val = b'0'
self.cache[key] = self.cache[key][:2] + (new_val, )
self.outbuf += new_val + b'\r\n'
else:
self.outbuf += b'NOT_FOUND\r\n'
def readline(self):
if self.read_return_empty_str:
return b''
if self.read_return_none:
return None
if self.down:
raise Exception('mock is down')
if b'\n' in self.outbuf:
response, self.outbuf = self.outbuf.split(b'\n', 1)
return response + b'\n'
def read(self, size):
if self.down:
raise Exception('mock is down')
if len(self.outbuf) >= size:
response = self.outbuf[:size]
self.outbuf = self.outbuf[size:]
return response
def close(self):
self.close_called = True
pass
class TestMemcacheCommand(unittest.TestCase):
def test_init(self):
cmd = MemcacheCommand("set", "shard-updating-v2/a/c")
self.assertEqual(cmd.method, "set")
self.assertEqual(cmd.command, b"set")
self.assertEqual(cmd.key, "shard-updating-v2/a/c")
self.assertEqual(cmd.key_prefix, "shard-updating-v2/a")
self.assertEqual(cmd.hash_key, md5hash("shard-updating-v2/a/c"))
def test_get_key_prefix(self):
cmd = MemcacheCommand("set", "shard-updating-v2/a/c")
self.assertEqual(cmd.key_prefix, "shard-updating-v2/a")
cmd = MemcacheCommand("set", "shard-listing-v2/accout/container3")
self.assertEqual(cmd.key_prefix, "shard-listing-v2/accout")
cmd = MemcacheCommand(
"set", "auth_reseller_name/token/X58E34EL2SDFLEY3")
self.assertEqual(cmd.key_prefix, "auth_reseller_name/token")
cmd = MemcacheCommand("set", "nvratelimit/v2/wf/2345392374")
self.assertEqual(cmd.key_prefix, "nvratelimit/v2/wf")
cmd = MemcacheCommand("set", "some_key")
self.assertEqual(cmd.key_prefix, "some_key")
class TestMemcached(unittest.TestCase):
"""Tests for swift.common.memcached"""
def setUp(self):
self.logger = debug_logger()
self.set_cmd = MemcacheCommand('set', 'key')
def test_logger_kwarg(self):
server_socket = '%s:%s' % ('[::1]', 11211)
client = memcached.MemcacheRing([server_socket])
self.assertIs(client.logger, logging.getLogger())
client = memcached.MemcacheRing([server_socket], logger=self.logger)
self.assertIs(client.logger, self.logger)
def test_tls_context_kwarg(self):
with patch('swift.common.memcached.socket.socket'):
server = '%s:%s' % ('[::1]', 11211)
client = memcached.MemcacheRing([server])
self.assertIsNone(client._client_cache[server]._tls_context)
context = mock.Mock()
client = memcached.MemcacheRing([server], tls_context=context)
self.assertIs(client._client_cache[server]._tls_context, context)
list(client._get_conns(self.set_cmd))
context.wrap_socket.assert_called_once()
def test_get_conns(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock1.bind(('127.0.0.1', 0))
sock1.listen(1)
sock1ipport = '%s:%s' % sock1.getsockname()
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.bind(('127.0.0.1', 0))
sock2.listen(1)
orig_port = memcached.DEFAULT_MEMCACHED_PORT
try:
sock2ip, memcached.DEFAULT_MEMCACHED_PORT = sock2.getsockname()
sock2ipport = '%s:%s' % (sock2ip, memcached.DEFAULT_MEMCACHED_PORT)
# We're deliberately using sock2ip (no port) here to test that the
# default port is used.
memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip],
logger=self.logger)
one = two = True
while one or two: # Run until we match hosts one and two
for conn in memcache_client._get_conns(self.set_cmd):
if 'b' not in getattr(conn[1], 'mode', ''):
self.assertIsInstance(conn[1], (
io.RawIOBase, io.BufferedIOBase))
peeripport = '%s:%s' % conn[2].getpeername()
self.assertTrue(peeripport in (sock1ipport, sock2ipport))
if peeripport == sock1ipport:
one = False
if peeripport == sock2ipport:
two = False
self.assertEqual(len(memcache_client._errors[sock1ipport]), 0)
self.assertEqual(len(memcache_client._errors[sock2ip]), 0)
finally:
memcached.DEFAULT_MEMCACHED_PORT = orig_port
def test_get_conns_v6(self):
if not socket.has_ipv6:
return
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0, 0, 0))
sock.listen(1)
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
for conn in memcache_client._get_conns(self.set_cmd):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
self.assertEqual(len(memcache_client._errors[server_socket]), 0)
finally:
sock.close()
def test_get_conns_v6_default(self):
if not socket.has_ipv6:
return
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
server_host = '[%s]' % sock_addr[0]
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
memcache_client = memcached.MemcacheRing([server_host],
logger=self.logger)
for conn in memcache_client._get_conns(self.set_cmd):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
self.assertEqual(len(memcache_client._errors[server_host]), 0)
finally:
sock.close()
def test_get_conns_bad_v6(self):
with self.assertRaises(ValueError):
# IPv6 address with missing [] is invalid
server_socket = '%s:%s' % ('::1', 11211)
memcached.MemcacheRing([server_socket], logger=self.logger)
def test_get_conns_hostname(self):
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
fqdn = socket.getfqdn()
server_socket = '%s:%s' % (fqdn, sock_addr[1])
addrinfo.return_value = [(socket.AF_INET,
socket.SOCK_STREAM, 0, '',
('127.0.0.1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
for conn in memcache_client._get_conns(self.set_cmd):
peer_sockaddr = conn[2].getpeername()
peer_socket = '%s:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
self.assertEqual(peer_socket,
'127.0.0.1:%d' % sock_addr[1])
self.assertEqual(len(memcache_client._errors[server_socket]),
0)
finally:
sock.close()
def test_get_conns_hostname6(self):
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
fqdn = socket.getfqdn()
server_socket = '%s:%s' % (fqdn, sock_addr[1])
addrinfo.return_value = [(socket.AF_INET6,
socket.SOCK_STREAM, 0, '',
('::1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
for conn in memcache_client._get_conns(self.set_cmd):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
self.assertEqual(peer_socket, '[::1]:%d' % sock_addr[1])
self.assertEqual(len(memcache_client._errors[server_socket]),
0)
finally:
sock.close()
def test_set_get_json(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
cache_key = md5(b'some_key',
usedforsecurity=False).hexdigest().encode('ascii')
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
# See JSON_FLAG
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[1, 2, 3]')})
memcache_client.set('some_key', [4, 5, 6])
self.assertEqual(memcache_client.get('some_key'), [4, 5, 6])
self.assertEqual(mock.cache, {cache_key: (b'2', b'0', b'[4, 5, 6]')})
memcache_client.set('some_key', ['simple str', 'utf8 str éà'])
# As per http://wiki.openstack.org/encoding,
# we should expect to have unicode
self.assertEqual(
memcache_client.get('some_key'), ['simple str', u'utf8 str éà'])
self.assertEqual(mock.cache, {cache_key: (
b'2', b'0', b'["simple str", "utf8 str \\u00e9\\u00e0"]')})
memcache_client.set('some_key', [1, 2, 3], time=20)
self.assertEqual(mock.cache, {cache_key: (b'2', b'20', b'[1, 2, 3]')})
sixtydays = 60 * 24 * 60 * 60
esttimeout = time.time() + sixtydays
memcache_client.set('some_key', [1, 2, 3], time=sixtydays)
_junk, cache_timeout, _junk = mock.cache[cache_key]
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
def test_set_error(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
memcache_client.set('too-big', [1, 2, 3])
self.assertEqual(
self.logger.get_lines_for_level('error'),
['Error setting value in memcached: 1.2.3.4:11211: '
'SERVER_ERROR object too large for cache'])
def test_get_failed_connection_mid_request(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
self.assertEqual(list(mock.cache.values()),
[(b'2', b'0', b'[1, 2, 3]')])
# Now lets return an empty string, and make sure we aren't logging
# the error.
fake_stdout = six.StringIO()
# force the logging through the DebugLogger instead of the nose
# handler. This will use stdout, so we can assert that no stack trace
# is logged.
with patch("sys.stdout", fake_stdout):
mock.read_return_empty_str = True
self.assertIsNone(memcache_client.get('some_key'))
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertNotIn("Traceback", fake_stdout.getvalue())
def test_incr(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
self.assertEqual(memcache_client.get('some_key'), b'10')
self.assertEqual(memcache_client.incr('some_key', delta=1), 11)
self.assertEqual(memcache_client.get('some_key'), b'11')
self.assertEqual(memcache_client.incr('some_key', delta=-5), 6)
self.assertEqual(memcache_client.get('some_key'), b'6')
self.assertEqual(memcache_client.incr('some_key', delta=-15), 0)
self.assertEqual(memcache_client.get('some_key'), b'0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.incr, 'some_key', delta=-15)
self.assertTrue(mock.close_called)
def test_incr_failed_connection_mid_request(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.incr('some_key', delta=5), 5)
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(memcache_client.incr('some_key', delta=5), 10)
self.assertEqual(memcache_client.get('some_key'), b'10')
# Now lets return an empty string, and make sure we aren't logging
# the error.
fake_stdout = six.StringIO()
# force the logging through the DebugLogger instead of the nose
# handler. This will use stdout, so we can assert that no stack trace
# is logged.
with patch("sys.stdout", fake_stdout):
mock.read_return_empty_str = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.incr, 'some_key', delta=1)
log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertNotIn('Traceback', fake_stdout.getvalue())
def test_incr_w_timeout(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
cache_key = md5(b'some_key',
usedforsecurity=False).hexdigest().encode('ascii')
memcache_client.incr('some_key', delta=5, time=55)
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(mock.cache, {cache_key: (b'0', b'55', b'5')})
memcache_client.delete('some_key')
self.assertIsNone(memcache_client.get('some_key'))
fiftydays = 50 * 24 * 60 * 60
esttimeout = time.time() + fiftydays
memcache_client.incr('some_key', delta=5, time=fiftydays)
self.assertEqual(memcache_client.get('some_key'), b'5')
_junk, cache_timeout, _junk = mock.cache[cache_key]
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
memcache_client.delete('some_key')
self.assertIsNone(memcache_client.get('some_key'))
memcache_client.incr('some_key', delta=5)
self.assertEqual(memcache_client.get('some_key'), b'5')
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'5')})
memcache_client.incr('some_key', delta=5, time=55)
self.assertEqual(memcache_client.get('some_key'), b'10')
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'10')})
def test_decr(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
self.assertEqual(memcache_client.decr('some_key', delta=5), 0)
self.assertEqual(memcache_client.get('some_key'), b'0')
self.assertEqual(memcache_client.incr('some_key', delta=15), 15)
self.assertEqual(memcache_client.get('some_key'), b'15')
self.assertEqual(memcache_client.decr('some_key', delta=4), 11)
self.assertEqual(memcache_client.get('some_key'), b'11')
self.assertEqual(memcache_client.decr('some_key', delta=15), 0)
self.assertEqual(memcache_client.get('some_key'), b'0')
mock.read_return_none = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.decr, 'some_key', delta=15)
def test_retry(self):
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
mock1 = ExplodingMockMemcached()
mock2 = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock2, mock2)])
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
[(mock1, mock1), (mock1, mock1)])
now = time.time()
with patch('time.time', return_value=now):
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(mock1.exploded, True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
self.logger.clear()
mock1.exploded = False
now = time.time()
with patch('time.time', return_value=now):
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
self.assertEqual(mock1.exploded, True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method get, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
# Check that we really did call create() twice
self.assertEqual(memcache_client._client_cache['1.2.3.5:11211'].mocks,
[])
def test_error_limiting(self):
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
mock1 = ExplodingMockMemcached()
mock2 = ExplodingMockMemcached()
mock2.should_explode = False
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock2, mock2)] * 12)
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
[(mock1, mock1)] * 12)
now = time.time()
with patch('time.time', return_value=now):
for _ in range(12):
memcache_client.set('some_key', [1, 2, 3])
# twelfth one skips .5 because of error limiting and goes straight
# to .4
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 11 + [
'Error limiting server 1.2.3.5:11211'
])
self.logger.clear()
mock2.should_explode = True
now = time.time()
with patch('time.time', return_value=now):
for _ in range(12):
memcache_client.set('some_key', [1, 2, 3])
# as we keep going, eventually .4 gets error limited, too
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
'Error limiting server 1.2.3.4:11211',
'All memcached servers error-limited',
])
self.logger.clear()
# continued requests just keep bypassing memcache
for _ in range(12):
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(self.logger.get_lines_for_level('error'), [
'All memcached servers error-limited',
] * 12)
self.logger.clear()
# and get()s are all a "cache miss"
self.assertIsNone(memcache_client.get('some_key'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'All memcached servers error-limited',
])
def test_error_disabled(self):
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211'], logger=self.logger, error_limit_time=0)
mock1 = ExplodingMockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock1, mock1)] * 20)
now = time.time()
with patch('time.time', return_value=now):
for _ in range(20):
memcache_client.set('some_key', [1, 2, 3])
# twelfth one skips .5 because of error limiting and goes straight
# to .4
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 20)
def test_error_raising(self):
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211'], logger=self.logger, error_limit_time=0)
mock1 = ExplodingMockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock1, mock1)] * 20)
# expect exception when requested...
now = time.time()
with patch('time.time', return_value=now):
with self.assertRaises(MemcacheConnectionError):
memcache_client.set('some_key', [1, 2, 3], raise_on_error=True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
self.logger.clear()
with patch('time.time', return_value=now):
with self.assertRaises(MemcacheConnectionError):
memcache_client.get('some_key', raise_on_error=True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method get, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
self.logger.clear()
with patch('time.time', return_value=now):
with self.assertRaises(MemcacheConnectionError):
memcache_client.set(
'shard-updating-v2/acc/container', [1, 2, 3],
raise_on_error=True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix shard-updating-v2/acc, method set, '
'time_spent 0.0, [Errno 32] Broken pipe',
])
self.logger.clear()
# ...but default is no exception
with patch('time.time', return_value=now):
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
self.logger.clear()
with patch('time.time', return_value=now):
memcache_client.get('some_key')
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix some_key, method get, time_spent 0.0, '
'[Errno 32] Broken pipe',
])
self.logger.clear()
with patch('time.time', return_value=now):
memcache_client.set('shard-updating-v2/acc/container', [1, 2, 3])
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix shard-updating-v2/acc, method set, '
'time_spent 0.0, [Errno 32] Broken pipe',
])
def test_error_limiting_custom_config(self):
def do_calls(time_step, num_calls, **memcache_kwargs):
self.logger.clear()
memcache_client = memcached.MemcacheRing(
['1.2.3.5:11211'], logger=self.logger,
**memcache_kwargs)
mock1 = ExplodingMockMemcached()
memcache_client._client_cache['1.2.3.5:11211'] = \
MockedMemcachePool([(mock1, mock1)] * num_calls)
for n in range(num_calls):
with mock.patch.object(memcached.tm, 'time',
return_value=time_step * n):
memcache_client.set('some_key', [1, 2, 3])
# with default error_limit_time of 60, one call per 5 secs, twelfth one
# triggers error limit
do_calls(5.0, 12)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
# with default error_limit_time of 60, one call per 6 secs, error limit
# is not triggered
do_calls(6.0, 20)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 20)
# with error_limit_time of 66, one call per 6 secs, twelfth one
# triggers error limit
do_calls(6.0, 12, error_limit_time=66)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
# with error_limit_time of 70, one call per 6 secs, error_limit_count
# of 11, 13th call triggers error limit
do_calls(6.0, 13, error_limit_time=70, error_limit_count=11)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
] * 11 + [
'Error talking to memcached: 1.2.3.5:11211: '
'with key_prefix some_key, method set, time_spent 0.0, '
'[Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
def test_delete(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
memcache_client.delete('some_key')
self.assertIsNone(memcache_client.get('some_key'))
def test_multi(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key')
self.assertEqual(
memcache_client.get_multi(('some_key2', 'some_key1'), 'multi_key'),
[[4, 5, 6], [1, 2, 3]])
for key in (b'some_key1', b'some_key2'):
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
self.assertIn(key, mock.cache)
_junk, cache_timeout, _junk = mock.cache[key]
self.assertEqual(cache_timeout, b'0')
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
time=20)
for key in (b'some_key1', b'some_key2'):
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
_junk, cache_timeout, _junk = mock.cache[key]
self.assertEqual(cache_timeout, b'20')
fortydays = 50 * 24 * 60 * 60
esttimeout = time.time() + fortydays
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
time=fortydays)
for key in (b'some_key1', b'some_key2'):
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
_junk, cache_timeout, _junk = mock.cache[key]
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
self.assertEqual(memcache_client.get_multi(
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
[[4, 5, 6], [1, 2, 3], None])
# Now lets simulate a lost connection and make sure we don't get
# the index out of range stack trace when it does
mock_stderr = six.StringIO()
not_expected = "IndexError: list index out of range"
with patch("sys.stderr", mock_stderr):
mock.read_return_empty_str = True
self.assertEqual(memcache_client.get_multi(
('some_key2', 'some_key1', 'not_exists'), 'multi_key'),
None)
self.assertFalse(not_expected in mock_stderr.getvalue())
def test_multi_delete(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
'1.2.3.5:11211'],
logger=self.logger)
mock1 = MockMemcached()
mock2 = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock1, mock1)] * 2)
memcache_client._client_cache['1.2.3.5:11211'] = MockedMemcachePool(
[(mock2, mock2)] * 2)
# MemcacheRing will put 'some_key0' on server 1.2.3.5:11211 and
# 'some_key1' and 'multi_key' on '1.2.3.4:11211'
memcache_client.set_multi(
{'some_key0': [1, 2, 3], 'some_key1': [4, 5, 6]}, 'multi_key')
self.assertEqual(
memcache_client.get_multi(('some_key1', 'some_key0'), 'multi_key'),
[[4, 5, 6], [1, 2, 3]])
for key in (b'some_key0', b'some_key1'):
key = md5(key, usedforsecurity=False).hexdigest().encode('ascii')
self.assertIn(key, mock1.cache)
_junk, cache_timeout, _junk = mock1.cache[key]
self.assertEqual(cache_timeout, b'0')
memcache_client.set('some_key0', [7, 8, 9])
self.assertEqual(memcache_client.get('some_key0'), [7, 8, 9])
key = md5(b'some_key0',
usedforsecurity=False).hexdigest().encode('ascii')
self.assertIn(key, mock2.cache)
# Delete 'some_key0' with server_key='multi_key'
memcache_client.delete('some_key0', server_key='multi_key')
self.assertEqual(memcache_client.get_multi(
('some_key0', 'some_key1'), 'multi_key'),
[None, [4, 5, 6]])
# 'some_key0' have to be available on 1.2.3.5:11211
self.assertEqual(memcache_client.get('some_key0'), [7, 8, 9])
self.assertIn(key, mock2.cache)
def test_serialization(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
self.assertEqual(len(mock.cache), 1)
key = next(iter(mock.cache))
self.assertEqual(mock.cache[key][0], b'2') # JSON_FLAG
# Pretend we've got some really old pickle data in there
mock.cache[key] = (b'1',) + mock.cache[key][1:]
self.assertIsNone(memcache_client.get('some_key'))
def test_connection_pooling(self):
with patch('swift.common.memcached.socket') as mock_module:
def mock_getaddrinfo(host, port, family=socket.AF_INET,
socktype=socket.SOCK_STREAM, proto=0,
flags=0):
return [(family, socktype, proto, '', (host, port))]
mock_module.getaddrinfo = mock_getaddrinfo
# patch socket, stub socket.socket, mock sock
mock_sock = mock_module.socket.return_value
# track clients waiting for connections
connected = []
connections = Queue()
errors = []
def wait_connect(addr):
connected.append(addr)
sleep(0.1) # yield
val = connections.get()
if val is not None:
errors.append(val)
mock_sock.connect = wait_connect
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
connect_timeout=10,
logger=self.logger)
# sanity
self.assertEqual(1, len(memcache_client._client_cache))
for server, pool in memcache_client._client_cache.items():
self.assertEqual(2, pool.max_size)
# make 10 requests "at the same time"
p = GreenPool()
for i in range(10):
p.spawn(memcache_client.set, 'key', 'value')
for i in range(3):
sleep(0.1)
self.assertEqual(2, len(connected))
# give out a connection
connections.put(None)
# at this point, only one connection should have actually been
# created, the other is in the creation step, and the rest of the
# clients are not attempting to connect. we let this play out a
# bit to verify.
for i in range(3):
sleep(0.1)
self.assertEqual(2, len(connected))
# finish up, this allows the final connection to be created, so
# that all the other clients can use the two existing connections
# and no others will be created.
connections.put(None)
connections.put('nono')
self.assertEqual(2, len(connected))
p.waitall()
self.assertEqual(2, len(connected))
self.assertEqual(0, len(errors),
"A client was allowed a third connection")
connections.get_nowait()
self.assertTrue(connections.empty())
def test_connection_pool_timeout(self):
connections = defaultdict(Queue)
pending = defaultdict(int)
served = defaultdict(int)
class MockConnectionPool(memcached.MemcacheConnPool):
def get(self):
pending[self.host] += 1
conn = connections[self.host].get()
pending[self.host] -= 1
return conn
def put(self, *args, **kwargs):
connections[self.host].put(*args, **kwargs)
served[self.host] += 1
with mock.patch.object(memcached, 'MemcacheConnPool',
MockConnectionPool):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
'1.2.3.5:11211'],
io_timeout=0.5,
pool_timeout=0.1,
logger=self.logger)
# Hand out a couple slow connections to 1.2.3.5, leaving 1.2.3.4
# fast. All ten (10) clients should try to talk to .5 first, and
# then move on to .4, and we'll assert all that below.
mock_conn = MagicMock(), MagicMock()
mock_conn[0].readline = lambda: b'STORED\r\n'
mock_conn[1].sendall = lambda x: sleep(0.2)
connections['1.2.3.5'].put(mock_conn)
connections['1.2.3.5'].put(mock_conn)
mock_conn = MagicMock(), MagicMock()
mock_conn[0].readline = lambda: b'STORED\r\n'
connections['1.2.3.4'].put(mock_conn)
connections['1.2.3.4'].put(mock_conn)
p = GreenPool()
for i in range(10):
p.spawn(memcache_client.set, 'key', 'value')
# Wait for the dust to settle.
p.waitall()
self.assertEqual(pending['1.2.3.5'], 8)
self.assertEqual(len(memcache_client._errors['1.2.3.5:11211']), 8)
error_logs = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_logs), 8)
for each_log in error_logs:
self.assertIn(
'Timeout getting a connection to memcached: 1.2.3.5:11211: '
'with key_prefix key',
each_log)
self.assertEqual(served['1.2.3.5'], 2)
self.assertEqual(pending['1.2.3.4'], 0)
self.assertEqual(len(memcache_client._errors['1.2.3.4:11211']), 0)
self.assertEqual(served['1.2.3.4'], 8)
# and we never got more put in that we gave out
self.assertEqual(connections['1.2.3.5'].qsize(), 2)
self.assertEqual(connections['1.2.3.4'].qsize(), 2)
def test_connection_slow_connect(self):
with patch('swift.common.memcached.socket') as mock_module:
def mock_getaddrinfo(host, port, family=socket.AF_INET,
socktype=socket.SOCK_STREAM, proto=0,
flags=0):
return [(family, socktype, proto, '', (host, port))]
mock_module.getaddrinfo = mock_getaddrinfo
# patch socket, stub socket.socket, mock sock
mock_sock = mock_module.socket.return_value
def wait_connect(addr):
# slow connect gives Timeout Exception
sleep(1)
# patch connect method
mock_sock.connect = wait_connect
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211'], connect_timeout=0.1, logger=self.logger)
# sanity
self.assertEqual(1, len(memcache_client._client_cache))
for server, pool in memcache_client._client_cache.items():
self.assertEqual(2, pool.max_size)
# try to get connect and no connection found
# so it will result in StopIteration
conn_generator = memcache_client._get_conns(self.set_cmd)
with self.assertRaises(StopIteration):
next(conn_generator)
self.assertEqual(1, mock_sock.close.call_count)
def test_item_size_warning_threshold(self):
mock = MockMemcached()
mocked_pool = MockedMemcachePool([(mock, mock)] * 2)
def do_test(d, threshold, should_warn, error=False):
self.logger.clear()
try:
memcache_client = memcached.MemcacheRing(
['1.2.3.4:11211'], item_size_warning_threshold=threshold,
logger=self.logger)
memcache_client._client_cache['1.2.3.4:11211'] = mocked_pool
memcache_client.set('some_key', d, serialize=False)
warning_lines = self.logger.get_lines_for_level('warning')
if should_warn:
self.assertIn(
'Item size larger than warning threshold: '
'%d (%s) >= %d (%s)' % (
len(d), human_readable(len(d)), threshold,
human_readable(threshold)),
warning_lines[0])
else:
self.assertFalse(warning_lines)
except ValueError as err:
if not err:
self.fail(err)
else:
self.assertIn(
'Config option must be a number, greater than 0, '
'less than 100, not "%s".' % threshold,
str(err))
data = '1' * 100
# let's start with something easy, say warning at 80
for data_size, warn in ((79, False), (80, True), (81, True),
(99, True), (100, True)):
do_test(data[:data_size], 80, warn)
# if we set the threshold to -1 will turn off the warning
for data_size, warn in ((79, False), (80, False), (81, False),
(99, False), (100, False)):
do_test(data[:data_size], -1, warn)
# Changing to 0 should warn on everything
for data_size, warn in ((0, True), (1, True), (50, True),
(99, True), (100, True)):
do_test(data[:data_size], 0, warn)
# Let's do a big number
do_test('1' * 2048576, 1000000, True)
def test_operations_timing_stats(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
with patch('time.time',) as mock_time:
mock_time.return_value = 1000.99
memcache_client.set('some_key', [1, 2, 3])
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.set.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 1000.99)
mock_time.return_value = 2000.99
self.assertEqual(memcache_client.get('some_key'), [1, 2, 3])
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.get.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 2000.99)
mock_time.return_value = 3000.99
self.assertEqual(memcache_client.decr('decr_key', delta=5), 0)
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.decr.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 3000.99)
mock_time.return_value = 4000.99
self.assertEqual(memcache_client.incr('decr_key', delta=5), 5)
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.incr.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 4000.99)
mock_time.return_value = 5000.99
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key')
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.set_multi.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 5000.99)
mock_time.return_value = 6000.99
self.assertEqual(
memcache_client.get_multi(
('some_key2', 'some_key1'),
'multi_key'),
[[4, 5, 6],
[1, 2, 3]])
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.get_multi.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 6000.99)
mock_time.return_value = 7000.99
memcache_client.delete('some_key')
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.delete.timing', last_stats[0][0])
self.assertEqual(last_stats[0][1], 7000.99)
def test_operations_timing_stats_with_incr_exception(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
raise Exception('add failed')
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_add', handle_add):
mock_time.return_value = 4000.99
with self.assertRaises(MemcacheConnectionError):
memcache_client.incr('incr_key', delta=5)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.incr.errors.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4000.99)
self.assertEqual(
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix incr_key, method incr, time_spent 0.0: ',
self.logger.get_lines_for_level('error')[0])
def test_operations_timing_stats_with_set_exception(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_set(key, flags, exptime, num_bytes, noreply=b''):
raise Exception('set failed')
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_set', handle_set):
mock_time.return_value = 4000.99
with self.assertRaises(MemcacheConnectionError):
memcache_client.set(
'set_key', [1, 2, 3],
raise_on_error=True)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.set.errors.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4000.99)
self.assertEqual(
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix set_key, method set, time_spent 0.0: ',
self.logger.get_lines_for_level('error')[0])
def test_operations_timing_stats_with_get_exception(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_get(*keys):
raise Exception('get failed')
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
mock_time.return_value = 4000.99
with self.assertRaises(MemcacheConnectionError):
memcache_client.get('get_key', raise_on_error=True)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.get.errors.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4000.99)
self.assertEqual(
'Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix get_key, method get, time_spent 0.0: ',
self.logger.get_lines_for_level('error')[0])
def test_operations_timing_stats_with_get_error(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_get(*keys):
raise MemcacheConnectionError('failed to connect')
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
mock_time.return_value = 4000.99
with self.assertRaises(MemcacheConnectionError):
memcache_client.get('get_key', raise_on_error=True)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.get.conn_err.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4000.99)
self.assertEqual('Error talking to memcached: 1.2.3.4:11211: '
'with key_prefix get_key, method get, '
'time_spent 0.0, failed to connect',
self.logger.get_lines_for_level('error')[0])
def test_operations_timing_stats_with_incr_timeout(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
io_timeout=0.01,
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
sleep(0.05)
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_add', handle_add):
mock_time.side_effect = itertools.count(4000.99, 1.0)
with self.assertRaises(MemcacheConnectionError):
memcache_client.incr('nvratelimit/v2/wf/124593', delta=5)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.incr.timeout.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4002.99)
error_logs = self.logger.get_lines_for_level('error')
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
error_logs[0])
self.assertIn(
'with key_prefix nvratelimit/v2/wf, ', error_logs[0])
self.assertIn('method incr, ', error_logs[0])
self.assertIn(
'config_timeout 0.01, time_spent 1.0', error_logs[0])
def test_operations_timing_stats_with_set_timeout(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
io_timeout=0.01,
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_set(key, flags, exptime, num_bytes, noreply=b''):
sleep(0.05)
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_set', handle_set):
mock_time.side_effect = itertools.count(4000.99, 1.0)
with self.assertRaises(MemcacheConnectionError):
memcache_client.set(
'shard-updating-v2/acc/container', [1, 2, 3],
raise_on_error=True)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.set.timeout.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4002.99)
error_logs = self.logger.get_lines_for_level('error')
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
error_logs[0])
self.assertIn(
'with key_prefix shard-updating-v2/acc, ', error_logs[0])
self.assertIn('method set, ', error_logs[0])
self.assertIn(
'config_timeout 0.01, time_spent 1.0', error_logs[0])
def test_operations_timing_stats_with_get_timeout(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
io_timeout=0.01,
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
def handle_get(*keys):
sleep(0.05)
with patch('time.time', ) as mock_time:
with mock.patch.object(mock_memcache, 'handle_get', handle_get):
mock_time.side_effect = itertools.count(4000.99, 1.0)
with self.assertRaises(MemcacheConnectionError):
memcache_client.get(
'shard-updating-v2/acc/container', raise_on_error=True)
self.assertTrue(
self.logger.statsd_client.calls['timing_since'])
last_stats = \
self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.get.timeout.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4002.99)
error_logs = self.logger.get_lines_for_level('error')
self.assertIn('Timeout talking to memcached: 1.2.3.4:11211: ',
error_logs[0])
self.assertIn(
'with key_prefix shard-updating-v2/acc, ', error_logs[0])
self.assertIn('method get, ', error_logs[0])
self.assertIn(
'config_timeout 0.01, time_spent 1.0', error_logs[0])
def test_incr_add_expires(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
io_timeout=0.01,
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
incr_calls = []
orig_incr = mock_memcache.handle_incr
orig_add = mock_memcache.handle_add
def handle_incr(key, value, noreply=b''):
if incr_calls:
mock_memcache.cache.clear()
incr_calls.append(key)
orig_incr(key, value, noreply)
def handle_add(key, flags, exptime, num_bytes, noreply=b''):
mock_memcache.cache[key] = 'already set!'
orig_add(key, flags, exptime, num_bytes, noreply)
mock_memcache.cache.clear()
with patch('time.time', ) as mock_time:
mock_time.side_effect = itertools.count(4000.99, 1.0)
with mock.patch.object(mock_memcache, 'handle_incr', handle_incr):
with mock.patch.object(mock_memcache, 'handle_add',
handle_add):
with self.assertRaises(MemcacheConnectionError):
memcache_client.incr(
'shard-updating-v2/acc/container', time=1.23)
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.incr.conn_err.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4002.99)
error_logs = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached: 1.2.3.4:11211: ',
error_logs[0])
self.assertIn('with key_prefix shard-updating-v2/acc, method incr, '
'time_spent 1.0, expired ttl=1.23',
error_logs[0])
self.assertIn('1.2.3.4:11211', memcache_client._errors)
self.assertFalse(memcache_client._errors['1.2.3.4:11211'])
def test_incr_unexpected_response(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
io_timeout=0.01,
logger=self.logger)
mock_memcache = MockMemcached()
memcache_client._client_cache[
'1.2.3.4:11211'] = MockedMemcachePool(
[(mock_memcache, mock_memcache)] * 2)
resp = b'UNEXPECTED RESPONSE\r\n'
def handle_incr(key, value, noreply=b''):
mock_memcache.outbuf += resp
with patch('time.time') as mock_time:
mock_time.side_effect = itertools.count(4000.99, 1.0)
with mock.patch.object(mock_memcache, 'handle_incr', handle_incr):
with self.assertRaises(MemcacheConnectionError):
memcache_client.incr(
'shard-updating-v2/acc/container', time=1.23)
self.assertTrue(self.logger.statsd_client.calls['timing_since'])
last_stats = self.logger.statsd_client.calls['timing_since'][-1]
self.assertEqual('memcached.incr.errors.timing',
last_stats[0][0])
self.assertEqual(last_stats[0][1], 4002.99)
error_logs = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached: 1.2.3.4:11211: ',
error_logs[0])
self.assertIn("with key_prefix shard-updating-v2/acc, method incr, "
"time_spent 1.0" % resp.split(), error_logs[0])
self.assertIn('1.2.3.4:11211', memcache_client._errors)
self.assertEqual([4005.99], memcache_client._errors['1.2.3.4:11211'])
class ExcConfigParser(object):
def read(self, path):
raise Exception('read called with %r' % path)
class EmptyConfigParser(object):
def read(self, path):
return False
def get_config_parser(memcache_servers='1.2.3.4:5',
memcache_max_connections='4',
section='memcache',
item_size_warning_threshold='75'):
_srvs = memcache_servers
_maxc = memcache_max_connections
_section = section
_warn_threshold = item_size_warning_threshold
class SetConfigParser(object):
def items(self, section_name):
if section_name != section:
raise NoSectionError(section_name)
return {
'memcache_servers': memcache_servers,
'memcache_max_connections': memcache_max_connections
}
def read(self, path):
return True
def get(self, section, option):
if _section == section:
if option == 'memcache_servers':
if _srvs == 'error':
raise NoOptionError(option, section)
return _srvs
elif option in ('memcache_max_connections',
'max_connections'):
if _maxc == 'error':
raise NoOptionError(option, section)
return _maxc
elif option == 'item_size_warning_threshold':
if _warn_threshold == 'error':
raise NoOptionError(option, section)
return _warn_threshold
else:
raise NoOptionError(option, section)
else:
raise NoSectionError(option)
return SetConfigParser
def start_response(*args):
pass
class TestLoadMemcache(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
def test_conf_default_read(self):
with mock.patch.object(memcached, 'ConfigParser', ExcConfigParser):
for d in ({},
{'memcache_servers': '6.7.8.9:10'},
{'memcache_max_connections': '30'},
{'item_size_warning_threshold': 75},
{'memcache_servers': '6.7.8.9:10',
'item_size_warning_threshold': '75'},
{'item_size_warning_threshold': '75',
'memcache_max_connections': '30'},
):
with self.assertRaises(Exception) as catcher:
memcached.load_memcache(d, self.logger)
self.assertEqual(
str(catcher.exception),
"read called with '/etc/swift/memcache.conf'")
def test_conf_set_no_read(self):
with mock.patch.object(memcached, 'ConfigParser', ExcConfigParser):
exc = None
try:
memcached.load_memcache({
'memcache_servers': '1.2.3.4:5',
'memcache_max_connections': '30',
'item_size_warning_threshold': '80'
}, self.logger)
except Exception as err:
exc = err
self.assertIsNone(exc)
def test_conf_default(self):
with mock.patch.object(memcached, 'ConfigParser', EmptyConfigParser):
memcache = memcached.load_memcache({}, self.logger)
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
self.assertEqual(
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
self.assertEqual(memcache.item_size_warning_threshold, -1)
def test_conf_inline(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '5',
'item_size_warning_threshold': '75'
}, self.logger)
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
self.assertEqual(
memcache._client_cache['6.7.8.9:10'].max_size, 5)
self.assertEqual(memcache.item_size_warning_threshold, 75)
def test_conf_inline_ratelimiting(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'error_suppression_limit': '5',
'error_suppression_interval': '2.5',
}, self.logger)
self.assertEqual(memcache._error_limit_count, 5)
self.assertEqual(memcache._error_limit_time, 2.5)
self.assertEqual(memcache._error_limit_duration, 2.5)
def test_conf_inline_tls(self):
fake_context = mock.Mock()
with mock.patch.object(ssl, 'create_default_context',
return_value=fake_context):
with mock.patch.object(memcached, 'ConfigParser',
get_config_parser()):
memcached.load_memcache({
'tls_enabled': 'true',
'tls_cafile': 'cafile',
'tls_certfile': 'certfile',
'tls_keyfile': 'keyfile',
}, self.logger)
ssl.create_default_context.assert_called_with(cafile='cafile')
fake_context.load_cert_chain.assert_called_with('certfile',
'keyfile')
def test_conf_extra_no_section(self):
with mock.patch.object(memcached, 'ConfigParser',
get_config_parser(section='foobar')):
memcache = memcached.load_memcache({}, self.logger)
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
self.assertEqual(
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_extra_no_option(self):
replacement_parser = get_config_parser(
memcache_servers='error',
memcache_max_connections='error')
with mock.patch.object(memcached, 'ConfigParser', replacement_parser):
memcache = memcached.load_memcache({}, self.logger)
self.assertEqual(memcache.memcache_servers, ['127.0.0.1:11211'])
self.assertEqual(
memcache._client_cache['127.0.0.1:11211'].max_size, 2)
def test_conf_inline_other_max_conn(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'memcache_servers': '6.7.8.9:10',
'max_connections': '5'
}, self.logger)
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
self.assertEqual(
memcache._client_cache['6.7.8.9:10'].max_size, 5)
def test_conf_inline_bad_max_conn(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'memcache_servers': '6.7.8.9:10',
'max_connections': 'bad42',
}, self.logger)
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
self.assertEqual(
memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_inline_bad_item_warning_threshold(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
with self.assertRaises(ValueError) as err:
memcached.load_memcache({
'memcache_servers': '6.7.8.9:10',
'item_size_warning_threshold': 'bad42',
}, self.logger)
self.assertIn('invalid literal for int() with base 10:',
str(err.exception))
def test_conf_from_extra_conf(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({}, self.logger)
self.assertEqual(memcache.memcache_servers, ['1.2.3.4:5'])
self.assertEqual(
memcache._client_cache['1.2.3.4:5'].max_size, 4)
def test_conf_from_extra_conf_bad_max_conn(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser(
memcache_max_connections='bad42')):
memcache = memcached.load_memcache({}, self.logger)
self.assertEqual(memcache.memcache_servers, ['1.2.3.4:5'])
self.assertEqual(
memcache._client_cache['1.2.3.4:5'].max_size, 2)
def test_conf_from_inline_and_maxc_from_extra_conf(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'memcache_servers': '6.7.8.9:10'}, self.logger)
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
self.assertEqual(
memcache._client_cache['6.7.8.9:10'].max_size, 4)
def test_conf_from_inline_and_sers_from_extra_conf(self):
with mock.patch.object(memcached, 'ConfigParser', get_config_parser()):
memcache = memcached.load_memcache({
'memcache_servers': '6.7.8.9:10',
'memcache_max_connections': '42',
}, self.logger)
self.assertEqual(memcache.memcache_servers, ['6.7.8.9:10'])
self.assertEqual(
memcache._client_cache['6.7.8.9:10'].max_size, 42)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_memcached.py |
# Copyright (c) 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.wsgi"""
import errno
import logging
import socket
import unittest
import os
from collections import defaultdict
from io import BytesIO
from textwrap import dedent
from six.moves.urllib.parse import quote
import mock
import swift.common.middleware.catch_errors
import swift.common.middleware.gatekeeper
import swift.proxy.server
import swift.obj.server as obj_server
import swift.container.server as container_server
import swift.account.server as account_server
from swift.common.swob import Request
from swift.common import wsgi, utils
from swift.common.storage_policy import POLICIES
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import (
temptree, with_tempdir, write_fake_ring, patch_policies, ConfigAssertMixin)
from paste.deploy import loadwsgi
def _fake_rings(tmpdir):
write_fake_ring(os.path.join(tmpdir, 'account.ring.gz'))
write_fake_ring(os.path.join(tmpdir, 'container.ring.gz'))
for policy in POLICIES:
obj_ring_path = \
os.path.join(tmpdir, policy.ring_name + '.ring.gz')
write_fake_ring(obj_ring_path)
# make sure there's no other ring cached on this policy
policy.object_ring = None
@patch_policies
class TestWSGI(unittest.TestCase, ConfigAssertMixin):
"""Tests for swift.common.wsgi"""
def test_init_request_processor(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
fallocate_reserve = 1%
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_file, 'proxy-server')
# verify pipeline is: catch_errors -> gatekeeper -> listing_formats ->
# copy -> dlo -> proxy-server
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assertIsInstance(app, expected)
app = app.app
expected = swift.common.middleware.gatekeeper.GatekeeperMiddleware
self.assertIsInstance(app, expected)
app = app.app
expected = swift.common.middleware.listing_formats.ListingFilter
self.assertIsInstance(app, expected)
app = app.app
expected = swift.common.middleware.copy.ServerSideCopyMiddleware
self.assertIsInstance(app, expected)
app = app.app
expected = swift.common.middleware.dlo.DynamicLargeObject
self.assertIsInstance(app, expected)
app = app.app
expected = \
swift.common.middleware.versioned_writes.VersionedWritesMiddleware
self.assertIsInstance(app, expected)
app = app.app
expected = swift.proxy.server.Application
self.assertIsInstance(app, expected)
# config settings applied to app instance
self.assertEqual(0.2, app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_file,
'here': os.path.dirname(conf_file),
'conn_timeout': '0.2',
'fallocate_reserve': '1%',
'swift_dir': t,
'__name__': 'proxy-server'
}
self.assertEqual(expected, conf)
# logger works
logger.info('testing')
self.assertEqual('proxy-server', log_name)
@with_tempdir
def test_loadapp_from_file(self, tempdir):
conf_path = os.path.join(tempdir, 'object-server.conf')
conf_body = """
[DEFAULT]
CONN_timeout = 10
client_timeout = 1
[app:main]
use = egg:swift#object
conn_timeout = 5
client_timeout = 2
CLIENT_TIMEOUT = 3
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app = wsgi.loadapp(conf_path)
self.assertIsInstance(app, obj_server.ObjectController)
self.assertTrue(isinstance(app, obj_server.ObjectController))
# N.B. paste config loading from *file* is already case-sensitive,
# so, CLIENT_TIMEOUT/client_timeout are unique options
self.assertEqual(1, app.client_timeout)
self.assertEqual(5, app.conn_timeout)
@with_tempdir
def test_loadapp_from_file_with_duplicate_var(self, tempdir):
conf_path = os.path.join(tempdir, 'object-server.conf')
conf_body = """
[app:main]
use = egg:swift#object
client_timeout = 2
client_timeout = 3
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app_config = lambda: wsgi.loadapp(conf_path)
self.assertDuplicateOption(app_config, 'client_timeout', 3.0)
@with_tempdir
def test_loadapp_from_file_with_global_conf(self, tempdir):
# verify that global_conf items override conf file DEFAULTS...
conf_path = os.path.join(tempdir, 'object-server.conf')
conf_body = """
[DEFAULT]
log_name = swift
[app:main]
use = egg:swift#object
log_name = swift-main
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app = wsgi.loadapp(conf_path)
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('swift', app.logger.server)
app = wsgi.loadapp(conf_path, global_conf={'log_name': 'custom'})
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('custom', app.logger.server)
# and regular section options...
conf_path = os.path.join(tempdir, 'object-server.conf')
conf_body = """
[DEFAULT]
[app:main]
use = egg:swift#object
log_name = swift-main
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app = wsgi.loadapp(conf_path)
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('swift-main', app.logger.server)
app = wsgi.loadapp(conf_path, global_conf={'log_name': 'custom'})
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('custom', app.logger.server)
# ...but global_conf items do not override conf file 'set' options
conf_body = """
[DEFAULT]
log_name = swift
[app:main]
use = egg:swift#object
set log_name = swift-main
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app = wsgi.loadapp(conf_path)
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('swift-main', app.logger.server)
app = wsgi.loadapp(conf_path, global_conf={'log_name': 'custom'})
self.assertIsInstance(app, obj_server.ObjectController)
self.assertEqual('swift-main', app.logger.server)
def test_loadapp_from_string(self):
conf_body = """
[DEFAULT]
CONN_timeout = 10
client_timeout = 1
[app:main]
use = egg:swift#object
conn_timeout = 5
client_timeout = 2
"""
app = wsgi.loadapp(wsgi.ConfigString(conf_body))
self.assertTrue(isinstance(app, obj_server.ObjectController))
self.assertEqual(1, app.client_timeout)
self.assertEqual(5, app.conn_timeout)
@with_tempdir
def test_loadapp_from_dir(self, tempdir):
conf_files = {
'pipeline': """
[pipeline:main]
pipeline = tempauth proxy-server
""",
'tempauth': """
[DEFAULT]
swift_dir = %s
random_VAR = foo
[filter:tempauth]
use = egg:swift#tempauth
random_var = bar
""" % tempdir,
'proxy': """
[DEFAULT]
conn_timeout = 5
client_timeout = 1
[app:proxy-server]
use = egg:swift#proxy
CONN_timeout = 10
client_timeout = 2
""",
}
_fake_rings(tempdir)
for filename, conf_body in conf_files.items():
path = os.path.join(tempdir, filename + '.conf')
with open(path, 'wt') as fd:
fd.write(dedent(conf_body))
app = wsgi.loadapp(tempdir)
# DEFAULT takes priority (!?)
self.assertEqual(5, app._pipeline_final_app.conn_timeout)
self.assertEqual(1, app._pipeline_final_app.client_timeout)
self.assertEqual('foo', app.app.app.app.conf['random_VAR'])
self.assertEqual('bar', app.app.app.app.conf['random_var'])
@with_tempdir
def test_loadapp_from_dir_with_duplicate_var(self, tempdir):
conf_files = {
'pipeline': """
[pipeline:main]
pipeline = tempauth proxy-server
""",
'tempauth': """
[DEFAULT]
swift_dir = %s
random_VAR = foo
[filter:tempauth]
use = egg:swift#tempauth
random_var = bar
""" % tempdir,
'proxy': """
[app:proxy-server]
use = egg:swift#proxy
client_timeout = 2
CLIENT_TIMEOUT = 1
conn_timeout = 3
conn_timeout = 4
""",
}
_fake_rings(tempdir)
for filename, conf_body in conf_files.items():
path = os.path.join(tempdir, filename + '.conf')
with open(path, 'wt') as fd:
fd.write(dedent(conf_body))
app_config = lambda: wsgi.loadapp(tempdir)
# N.B. our paste conf.d parsing re-uses readconf,
# so, CLIENT_TIMEOUT/client_timeout are unique options
self.assertDuplicateOption(app_config, 'conn_timeout', 4.0)
@with_tempdir
def test_load_app_config(self, tempdir):
conf_file = os.path.join(tempdir, 'file.conf')
def _write_and_load_conf_file(conf):
with open(conf_file, 'wt') as fd:
fd.write(dedent(conf))
return wsgi.load_app_config(conf_file)
# typical case - DEFAULT options override same option in other sections
conf_str = """
[DEFAULT]
dflt_option = dflt-value
[pipeline:main]
pipeline = proxy-logging proxy-server
[filter:proxy-logging]
use = egg:swift#proxy_logging
[app:proxy-server]
use = egg:swift#proxy
proxy_option = proxy-value
dflt_option = proxy-dflt-value
"""
proxy_conf = _write_and_load_conf_file(conf_str)
self.assertEqual('proxy-value', proxy_conf['proxy_option'])
self.assertEqual('dflt-value', proxy_conf['dflt_option'])
# 'set' overrides DEFAULT option
conf_str = """
[DEFAULT]
dflt_option = dflt-value
[pipeline:main]
pipeline = proxy-logging proxy-server
[filter:proxy-logging]
use = egg:swift#proxy_logging
[app:proxy-server]
use = egg:swift#proxy
proxy_option = proxy-value
set dflt_option = proxy-dflt-value
"""
proxy_conf = _write_and_load_conf_file(conf_str)
self.assertEqual('proxy-value', proxy_conf['proxy_option'])
self.assertEqual('proxy-dflt-value', proxy_conf['dflt_option'])
# actual proxy server app name is dereferenced
conf_str = """
[pipeline:main]
pipeline = proxy-logging proxyserverapp
[filter:proxy-logging]
use = egg:swift#proxy_logging
[app:proxyserverapp]
use = egg:swift#proxy
proxy_option = proxy-value
dflt_option = proxy-dflt-value
"""
proxy_conf = _write_and_load_conf_file(conf_str)
self.assertEqual('proxy-value', proxy_conf['proxy_option'])
self.assertEqual('proxy-dflt-value', proxy_conf['dflt_option'])
# no pipeline
conf_str = """
[filter:proxy-logging]
use = egg:swift#proxy_logging
[app:proxy-server]
use = egg:swift#proxy
proxy_option = proxy-value
"""
proxy_conf = _write_and_load_conf_file(conf_str)
self.assertEqual({}, proxy_conf)
# no matching section
conf_str = """
[pipeline:main]
pipeline = proxy-logging proxy-server
[filter:proxy-logging]
use = egg:swift#proxy_logging
"""
proxy_conf = _write_and_load_conf_file(conf_str)
self.assertEqual({}, proxy_conf)
def test_init_request_processor_from_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = catch_errors proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
""",
'proxy-server.conf.d/catch-errors.conf': """
[filter:catch_errors]
use = egg:swift#catch_errors
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with mock.patch('swift.proxy.server.Application.modify_wsgi_pipeline'):
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_dir, 'proxy-server')
# verify pipeline is catch_errors -> proxy-server
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assertTrue(isinstance(app, expected))
self.assertTrue(isinstance(app.app, swift.proxy.server.Application))
# config settings applied to app instance
self.assertEqual(0.2, app.app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_dir,
'here': conf_dir,
'conn_timeout': '0.2',
'swift_dir': conf_root,
'__name__': 'proxy-server'
}
self.assertEqual(expected, conf)
# logger works
logger.info('testing')
self.assertEqual('proxy-server', log_name)
def test_get_socket_bad_values(self):
# first try with no port set
self.assertRaises(wsgi.ConfigFilePortError, wsgi.get_socket, {})
# next try with a bad port value set
self.assertRaises(wsgi.ConfigFilePortError, wsgi.get_socket,
{'bind_port': 'abc'})
self.assertRaises(wsgi.ConfigFilePortError, wsgi.get_socket,
{'bind_port': None})
def test_get_socket(self):
# stubs
conf = {'bind_port': 54321}
ssl_conf = conf.copy()
ssl_conf.update({
'cert_file': '',
'key_file': '',
})
# mocks
class MockSocket(object):
def __init__(self):
self.opts = defaultdict(dict)
def setsockopt(self, level, optname, value):
self.opts[level][optname] = value
def mock_listen(*args, **kwargs):
return MockSocket()
class MockSsl(object):
def __init__(self):
self.wrap_socket_called = []
def wrap_socket(self, sock, **kwargs):
self.wrap_socket_called.append(kwargs)
return sock
# patch
old_listen = wsgi.listen
old_ssl = wsgi.ssl
try:
wsgi.listen = mock_listen
wsgi.ssl = MockSsl()
# test
sock = wsgi.get_socket(conf)
# assert
self.assertTrue(isinstance(sock, MockSocket))
expected_socket_opts = {
socket.SOL_SOCKET: {
socket.SO_KEEPALIVE: 1,
},
socket.IPPROTO_TCP: {
socket.TCP_NODELAY: 1,
}
}
if hasattr(socket, 'TCP_KEEPIDLE'):
expected_socket_opts[socket.IPPROTO_TCP][
socket.TCP_KEEPIDLE] = 600
self.assertEqual(sock.opts, expected_socket_opts)
# test ssl
sock = wsgi.get_socket(ssl_conf)
expected_kwargs = {
'certfile': '',
'keyfile': '',
}
self.assertEqual(wsgi.ssl.wrap_socket_called, [expected_kwargs])
# test keep_idle value
keepIdle_value = 700
conf['keep_idle'] = keepIdle_value
sock = wsgi.get_socket(conf)
# assert
if hasattr(socket, 'TCP_KEEPIDLE'):
expected_socket_opts[socket.IPPROTO_TCP][
socket.TCP_KEEPIDLE] = keepIdle_value
self.assertEqual(sock.opts, expected_socket_opts)
# test keep_idle for str -> int conversion
keepIdle_value = '800'
conf['keep_idle'] = keepIdle_value
sock = wsgi.get_socket(conf)
# assert
if hasattr(socket, 'TCP_KEEPIDLE'):
expected_socket_opts[socket.IPPROTO_TCP][
socket.TCP_KEEPIDLE] = int(keepIdle_value)
self.assertEqual(sock.opts, expected_socket_opts)
# test keep_idle for negative value
conf['keep_idle'] = -600
self.assertRaises(wsgi.ConfigFileError, wsgi.get_socket, conf)
# test keep_idle for upperbound value
conf['keep_idle'] = 2 ** 15
self.assertRaises(wsgi.ConfigFileError, wsgi.get_socket, conf)
# test keep_idle for Type mismatch
conf['keep_idle'] = 'foobar'
self.assertRaises(wsgi.ConfigFileError, wsgi.get_socket, conf)
finally:
wsgi.listen = old_listen
wsgi.ssl = old_ssl
def test_address_in_use(self):
# stubs
conf = {'bind_port': 54321}
# mocks
def mock_listen(*args, **kwargs):
raise socket.error(errno.EADDRINUSE)
def value_error_listen(*args, **kwargs):
raise ValueError('fake')
def mock_sleep(*args):
pass
class MockTime(object):
"""Fast clock advances 10 seconds after every call to time
"""
def __init__(self):
self.current_time = old_time.time()
def time(self, *args, **kwargs):
rv = self.current_time
# advance for next call
self.current_time += 10
return rv
old_listen = wsgi.listen
old_sleep = wsgi.sleep
old_time = wsgi.time
try:
wsgi.listen = mock_listen
wsgi.sleep = mock_sleep
wsgi.time = MockTime()
# test error
self.assertRaises(Exception, wsgi.get_socket, conf)
# different error
wsgi.listen = value_error_listen
self.assertRaises(ValueError, wsgi.get_socket, conf)
finally:
wsgi.listen = old_listen
wsgi.sleep = old_sleep
wsgi.time = old_time
def test_run_server(self):
config = """
[DEFAULT]
client_timeout = 30
keepalive_timeout = 10
max_clients = 1000
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# while "set" values normally override default
set client_timeout = 20
# this section is not in conf during run_server
set max_clients = 10
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt:
conf = wsgi.appconfig(conf_file)
logger = logging.getLogger('test')
sock = listen_zero()
wsgi.run_server(conf, logger, sock,
allow_modify_pipeline=False)
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
_wsgi_evt.debug.hub_exceptions.assert_called_with(False)
self.assertTrue(_wsgi.server.called)
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEqual(sock, server_sock)
self.assertTrue(isinstance(server_app, swift.proxy.server.Application))
self.assertEqual(20, server_app.client_timeout)
self.assertTrue(isinstance(server_logger, wsgi.NullLogger))
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(1000, kwargs['custom_pool'].size)
self.assertEqual(30, kwargs['socket_timeout'])
self.assertEqual(10, kwargs['keepalive'])
proto_class = kwargs['protocol']
self.assertEqual(proto_class, wsgi.SwiftHttpProtocol)
self.assertEqual('HTTP/1.0', proto_class.default_request_version)
def test_run_server_proxied(self):
config = """
[DEFAULT]
client_timeout = 30
max_clients = 1000
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# these "set" values override defaults
set client_timeout = 2.5
set max_clients = 10
require_proxy_protocol = true
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch('swift.proxy.server.Application.'
'modify_wsgi_pipeline'), \
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
mock.patch('swift.common.wsgi.eventlet') as _eventlet:
conf = wsgi.appconfig(conf_file,
name='proxy-server')
logger = logging.getLogger('test')
sock = listen_zero()
wsgi.run_server(conf, logger, sock)
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
_eventlet.debug.hub_exceptions.assert_called_with(False)
self.assertTrue(_wsgi.server.called)
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEqual(sock, server_sock)
self.assertTrue(isinstance(server_app, swift.proxy.server.Application))
self.assertEqual(2.5, server_app.client_timeout)
self.assertTrue(isinstance(server_logger, wsgi.NullLogger))
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(10, kwargs['custom_pool'].size)
self.assertEqual(2.5, kwargs['socket_timeout'])
self.assertNotIn('keepalive', kwargs) # eventlet defaults to True
proto_class = kwargs['protocol']
self.assertEqual(proto_class, wsgi.SwiftHttpProxiedProtocol)
self.assertEqual('HTTP/1.0', proto_class.default_request_version)
def test_run_server_with_latest_eventlet(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
keepalive_timeout = 0
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch('swift.proxy.server.Application.'
'modify_wsgi_pipeline'), \
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
mock.patch('swift.common.wsgi.eventlet'):
conf = wsgi.appconfig(conf_file)
logger = logging.getLogger('test')
sock = listen_zero()
wsgi.run_server(conf, logger, sock)
self.assertTrue(_wsgi.server.called)
args, kwargs = _wsgi.server.call_args
self.assertEqual(kwargs.get('capitalize_response_headers'), False)
self.assertTrue('protocol' in kwargs)
self.assertEqual('HTTP/1.0',
kwargs['protocol'].default_request_version)
self.assertIs(False, kwargs['keepalive'])
def test_run_server_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
""",
'proxy-server.conf.d/default.conf': """
[DEFAULT]
client_timeout = 30
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
with mock.patch('swift.proxy.server.Application.'
'modify_wsgi_pipeline'), \
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt, \
mock.patch.dict('os.environ', {'TZ': ''}), \
mock.patch('time.tzset'):
conf = wsgi.appconfig(conf_dir)
logger = logging.getLogger('test')
sock = listen_zero()
wsgi.run_server(conf, logger, sock)
self.assertNotEqual(os.environ['TZ'], '')
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
_wsgi_evt.debug.hub_exceptions.assert_called_with(False)
self.assertTrue(_wsgi.server.called)
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEqual(sock, server_sock)
self.assertTrue(isinstance(server_app, swift.proxy.server.Application))
self.assertTrue(isinstance(server_logger, wsgi.NullLogger))
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(30, kwargs['socket_timeout'])
self.assertTrue('protocol' in kwargs)
self.assertEqual('HTTP/1.0',
kwargs['protocol'].default_request_version)
def test_run_server_debug(self):
config = """
[DEFAULT]
eventlet_debug = yes
client_timeout = 30
max_clients = 1000
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# while "set" values normally override default
set client_timeout = 20
# this section is not in conf during run_server
set max_clients = 10
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch('swift.proxy.server.Application.'
'modify_wsgi_pipeline'), \
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt:
mock_server = _wsgi.server
_wsgi.server = lambda *args, **kwargs: mock_server(
*args, **kwargs)
conf = wsgi.appconfig(conf_file)
logger = logging.getLogger('test')
sock = listen_zero()
wsgi.run_server(conf, logger, sock)
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
_wsgi_evt.debug.hub_exceptions.assert_called_with(True)
self.assertTrue(mock_server.called)
args, kwargs = mock_server.call_args
server_sock, server_app, server_logger = args
self.assertEqual(sock, server_sock)
self.assertTrue(isinstance(server_app, swift.proxy.server.Application))
self.assertEqual(20, server_app.client_timeout)
self.assertIsNone(server_logger)
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(1000, kwargs['custom_pool'].size)
self.assertEqual(30, kwargs['socket_timeout'])
self.assertTrue('protocol' in kwargs)
self.assertEqual('HTTP/1.0',
kwargs['protocol'].default_request_version)
def test_appconfig_dir_ignores_hidden_files(self):
config_dir = {
'server.conf.d/01.conf': """
[app:main]
use = egg:swift#proxy
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[app:main]
use = egg:swift#proxy
port = 8081
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = wsgi.appconfig(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'here': os.path.join(path, 'server.conf.d'),
'port': '8080', '__name__': 'main'
}
self.assertEqual(conf, expected)
def test_pre_auth_wsgi_input(self):
oldenv = {}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEqual(newenv['wsgi.input'].read(), b'')
oldenv = {'wsgi.input': BytesIO(b'original wsgi.input')}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEqual(newenv['wsgi.input'].read(), b'')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertEqual(newenv['swift.source'], 'UT')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv, swift_source='SA')
self.assertEqual(newenv['swift.source'], 'SA')
def test_pre_auth_req(self):
class FakeReq(object):
@classmethod
def fake_blank(cls, path, environ=None, body=b'', headers=None):
if environ is None:
environ = {}
if headers is None:
headers = {}
self.assertIsNone(environ['swift.authorize']('test'))
self.assertFalse('HTTP_X_TRANS_ID' in environ)
was_blank = Request.blank
Request.blank = FakeReq.fake_blank
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', body=b'tester', headers={})
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', headers={})
Request.blank = was_blank
def test_pre_auth_req_with_quoted_path(self):
r = wsgi.make_pre_authed_request(
{'HTTP_X_TRANS_ID': '1234'}, 'PUT', path=quote('/a space'),
body=b'tester', headers={})
self.assertEqual(r.path, quote('/a space'))
def test_pre_auth_req_drops_query(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path')
self.assertEqual(r.query_string, 'original')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?replacement')
self.assertEqual(r.query_string, 'replacement')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?')
self.assertEqual(r.query_string, '')
def test_pre_auth_req_with_body(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', b'the body')
self.assertEqual(r.body, b'the body')
def test_pre_auth_creates_script_name(self):
e = wsgi.make_pre_authed_env({})
self.assertTrue('SCRIPT_NAME' in e)
def test_pre_auth_copies_script_name(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'})
self.assertEqual(e['SCRIPT_NAME'], '/script_name')
def test_pre_auth_copies_script_name_unless_path_overridden(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'},
path='/override')
self.assertEqual(e['SCRIPT_NAME'], '')
self.assertEqual(e['PATH_INFO'], '/override')
def test_pre_auth_req_swift_source(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', b'the body',
swift_source='UT')
self.assertEqual(r.body, b'the body')
self.assertEqual(r.environ['swift.source'], 'UT')
def test_run_server_global_conf_callback(self):
calls = defaultdict(lambda: 0)
def _initrp(conf_file, app_section, *args, **kwargs):
return (
{'__file__': 'test', 'workers': 0, 'bind_port': 12345},
'logger',
'log_name')
loadapp_conf = []
to_inject = object() # replication_timeout injects non-string data
def _global_conf_callback(preloaded_app_conf, global_conf):
calls['_global_conf_callback'] += 1
self.assertEqual(
preloaded_app_conf,
{'__file__': 'test', 'workers': 0, 'bind_port': 12345})
self.assertEqual(global_conf, {'log_name': 'log_name'})
global_conf['test1'] = to_inject
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
self.assertIn('global_conf', kwargs)
loadapp_conf.append(kwargs['global_conf'])
# global_conf_callback hasn't been called yet
self.assertNotIn('test1', kwargs['global_conf'])
def _run_server(*args, **kwargs):
# but by the time that we actually *run* the server, it has
self.assertEqual(loadapp_conf,
[{'log_name': 'log_name', 'test1': to_inject}])
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket'), \
mock.patch.object(wsgi, 'drop_privileges'), \
mock.patch.object(wsgi, 'loadapp', _loadapp), \
mock.patch.object(wsgi, 'capture_stdio'), \
mock.patch.object(wsgi, 'run_server', _run_server), \
mock.patch('swift.common.utils.eventlet') as _utils_evt:
wsgi.run_wsgi('conf_file', 'app_section',
global_conf_callback=_global_conf_callback)
self.assertEqual(calls['_global_conf_callback'], 1)
self.assertEqual(calls['_loadapp'], 1)
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
socket=True,
select=True,
thread=True)
def test_run_server_success(self):
calls = defaultdict(int)
def _initrp(conf_file, app_section, *args, **kwargs):
calls['_initrp'] += 1
return (
{'__file__': 'test', 'workers': 0, 'bind_port': 12345},
'logger',
'log_name')
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
logging.logThreads = 1 # reset to default
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket'), \
mock.patch.object(wsgi, 'drop_privileges') as _d_privs, \
mock.patch.object(wsgi, 'clean_up_daemon_hygiene') as _c_hyg, \
mock.patch.object(wsgi, 'loadapp', _loadapp), \
mock.patch.object(wsgi, 'capture_stdio'), \
mock.patch.object(wsgi, 'run_server'), \
mock.patch('swift.common.utils.eventlet') as _utils_evt:
rc = wsgi.run_wsgi('conf_file', 'app_section')
self.assertEqual(calls['_initrp'], 1)
self.assertEqual(calls['_loadapp'], 1)
self.assertEqual(rc, 0)
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
socket=True,
select=True,
thread=True)
# run_wsgi() no longer calls drop_privileges() in the parent process,
# just clean_up_daemon_hygene()
self.assertEqual([], _d_privs.mock_calls)
self.assertEqual([mock.call()], _c_hyg.mock_calls)
self.assertEqual(0, logging.logThreads) # fixed in our monkey_patch
def test_run_server_test_config(self):
calls = defaultdict(int)
def _initrp(conf_file, app_section, *args, **kwargs):
calls['_initrp'] += 1
return (
{'__file__': 'test', 'workers': 0, 'bind_port': 12345},
'logger',
'log_name')
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket') as _get_socket, \
mock.patch.object(wsgi, 'drop_privileges') as _d_privs, \
mock.patch.object(wsgi, 'clean_up_daemon_hygiene') as _c_hyg, \
mock.patch.object(wsgi, 'loadapp', _loadapp), \
mock.patch.object(wsgi, 'capture_stdio'), \
mock.patch.object(wsgi, 'run_server'), \
mock.patch('swift.common.utils.eventlet') as _utils_evt:
rc = wsgi.run_wsgi('conf_file', 'app_section', test_config=True)
self.assertEqual(calls['_initrp'], 1)
self.assertEqual(calls['_loadapp'], 1)
self.assertEqual(rc, 0)
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
socket=True,
select=True,
thread=True)
# run_wsgi() stops before calling clean_up_daemon_hygene() or
# creating sockets
self.assertEqual([], _d_privs.mock_calls)
self.assertEqual([], _c_hyg.mock_calls)
self.assertEqual([], _get_socket.mock_calls)
@mock.patch('swift.common.wsgi.run_server')
@mock.patch('swift.common.wsgi.WorkersStrategy')
@mock.patch('swift.common.wsgi.ServersPerPortStrategy')
def test_run_server_strategy_plumbing(self, mock_per_port, mock_workers,
mock_run_server):
# Make sure the right strategy gets used in a number of different
# config cases.
class StopAtCreatingSockets(Exception):
'''Dummy exception to make sure we don't actually bind ports'''
mock_per_port().no_fork_sock.return_value = None
mock_per_port().new_worker_socks.side_effect = StopAtCreatingSockets
mock_workers().no_fork_sock.return_value = None
mock_workers().new_worker_socks.side_effect = StopAtCreatingSockets
logger = debug_logger()
stub__initrp = [
{'__file__': 'test', 'workers': 2, 'bind_port': 12345}, # conf
logger,
'log_name',
]
with mock.patch.object(wsgi, '_initrp', return_value=stub__initrp), \
mock.patch.object(wsgi, 'loadapp'), \
mock.patch.object(wsgi, 'capture_stdio'):
for server_type in ('account-server', 'container-server',
'object-server'):
mock_per_port.reset_mock()
mock_workers.reset_mock()
logger._clear()
with self.assertRaises(StopAtCreatingSockets):
wsgi.run_wsgi('conf_file', server_type)
self.assertEqual([], mock_per_port.mock_calls)
self.assertEqual([
mock.call(stub__initrp[0], logger),
mock.call().no_fork_sock(),
mock.call().new_worker_socks(),
], mock_workers.mock_calls)
stub__initrp[0]['servers_per_port'] = 3
for server_type in ('account-server', 'container-server'):
mock_per_port.reset_mock()
mock_workers.reset_mock()
logger._clear()
with self.assertRaises(StopAtCreatingSockets):
wsgi.run_wsgi('conf_file', server_type)
self.assertEqual([], mock_per_port.mock_calls)
self.assertEqual([
mock.call(stub__initrp[0], logger),
mock.call().no_fork_sock(),
mock.call().new_worker_socks(),
], mock_workers.mock_calls)
mock_per_port.reset_mock()
mock_workers.reset_mock()
logger._clear()
with self.assertRaises(StopAtCreatingSockets):
wsgi.run_wsgi('conf_file', 'object-server')
self.assertEqual([
mock.call(stub__initrp[0], logger, servers_per_port=3),
mock.call().no_fork_sock(),
mock.call().new_worker_socks(),
], mock_per_port.mock_calls)
self.assertEqual([], mock_workers.mock_calls)
def test_run_server_failure1(self):
calls = defaultdict(lambda: 0)
def _initrp(conf_file, app_section, *args, **kwargs):
calls['_initrp'] += 1
raise wsgi.ConfigFileError('test exception')
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket'), \
mock.patch.object(wsgi, 'drop_privileges'), \
mock.patch.object(wsgi, 'loadapp', _loadapp), \
mock.patch.object(wsgi, 'capture_stdio'), \
mock.patch.object(wsgi, 'run_server'):
rc = wsgi.run_wsgi('conf_file', 'app_section')
self.assertEqual(calls['_initrp'], 1)
self.assertEqual(calls['_loadapp'], 0)
self.assertEqual(rc, 1)
def test_run_server_bad_bind_port(self):
def do_test(port):
calls = defaultdict(lambda: 0)
logger = debug_logger()
def _initrp(conf_file, app_section, *args, **kwargs):
calls['_initrp'] += 1
return (
{'__file__': 'test', 'workers': 0, 'bind_port': port},
logger,
'log_name')
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket'), \
mock.patch.object(wsgi, 'drop_privileges'), \
mock.patch.object(wsgi, 'loadapp', _loadapp), \
mock.patch.object(wsgi, 'capture_stdio'), \
mock.patch.object(wsgi, 'run_server'):
rc = wsgi.run_wsgi('conf_file', 'app_section')
self.assertEqual(calls['_initrp'], 1)
self.assertEqual(calls['_loadapp'], 0)
self.assertEqual(rc, 1)
self.assertEqual(
["bind_port wasn't properly set in the config file. "
"It must be explicitly set to a valid port number."],
logger.get_lines_for_level('error')
)
do_test('bad')
do_test('80000')
def test_pre_auth_req_with_empty_env_no_path(self):
r = wsgi.make_pre_authed_request(
{}, 'GET')
self.assertEqual(r.path, quote(''))
self.assertTrue('SCRIPT_NAME' in r.environ)
self.assertTrue('PATH_INFO' in r.environ)
def test_pre_auth_req_with_env_path(self):
r = wsgi.make_pre_authed_request(
{'PATH_INFO': '/unquoted path with %20'}, 'GET')
self.assertEqual(r.path, quote('/unquoted path with %20'))
self.assertEqual(r.environ['SCRIPT_NAME'], '')
def test_pre_auth_req_with_env_script(self):
r = wsgi.make_pre_authed_request({'SCRIPT_NAME': '/hello'}, 'GET')
self.assertEqual(r.path, quote('/hello'))
def test_pre_auth_req_with_env_path_and_script(self):
env = {'PATH_INFO': '/unquoted path with %20',
'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
expected_path = quote(env['SCRIPT_NAME'] + env['PATH_INFO'])
self.assertEqual(r.path, expected_path)
env = {'PATH_INFO': '', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEqual(r.path, '/script')
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEqual(r.path, '/path')
env = {'PATH_INFO': '', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEqual(r.path, '')
def test_pre_auth_req_path_overrides_env(self):
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET', '/override')
self.assertEqual(r.path, '/override')
self.assertEqual(r.environ['SCRIPT_NAME'], '')
self.assertEqual(r.environ['PATH_INFO'], '/override')
def test_make_env_keep_user_project_id(self):
oldenv = {'HTTP_X_USER_ID': '1234', 'HTTP_X_PROJECT_ID': '5678'}
newenv = wsgi.make_env(oldenv)
self.assertTrue('HTTP_X_USER_ID' in newenv)
self.assertEqual(newenv['HTTP_X_USER_ID'], '1234')
self.assertTrue('HTTP_X_PROJECT_ID' in newenv)
self.assertEqual(newenv['HTTP_X_PROJECT_ID'], '5678')
def test_make_env_keeps_referer(self):
oldenv = {'HTTP_REFERER': 'http://blah.example.com'}
newenv = wsgi.make_env(oldenv)
self.assertTrue('HTTP_REFERER' in newenv)
self.assertEqual(newenv['HTTP_REFERER'], 'http://blah.example.com')
def test_make_env_keeps_infocache(self):
oldenv = {'swift.infocache': {}}
newenv = wsgi.make_env(oldenv)
self.assertIs(newenv.get('swift.infocache'), oldenv['swift.infocache'])
class CommonTestMixin(object):
@mock.patch('swift.common.wsgi.capture_stdio')
def test_post_fork_hook(self, mock_capture):
self.strategy.post_fork_hook()
self.assertEqual([
mock.call('bob'),
], self.mock_drop_privileges.mock_calls)
self.assertEqual([
mock.call(self.logger),
], mock_capture.mock_calls)
class TestServersPerPortStrategy(unittest.TestCase, CommonTestMixin):
def setUp(self):
self.logger = debug_logger()
self.conf = {
'workers': 100, # ignored
'user': 'bob',
'swift_dir': '/jim/cricket',
'ring_check_interval': '76',
'bind_ip': '2.3.4.5',
}
self.servers_per_port = 3
self.sockets = [mock.MagicMock() for _ in range(6)]
patcher = mock.patch('swift.common.wsgi.get_socket',
side_effect=self.sockets)
self.mock_get_socket = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.drop_privileges')
self.mock_drop_privileges = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.BindPortsCache')
self.mock_cache_class = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.os.setsid')
self.mock_setsid = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.os.chdir')
self.mock_chdir = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.os.umask')
self.mock_umask = patcher.start()
self.addCleanup(patcher.stop)
self.all_bind_ports_for_node = \
self.mock_cache_class().all_bind_ports_for_node
self.ports = (6006, 6007)
self.all_bind_ports_for_node.return_value = set(self.ports)
self.strategy = wsgi.ServersPerPortStrategy(self.conf, self.logger,
self.servers_per_port)
def test_loop_timeout(self):
# This strategy should loop every ring_check_interval seconds, even if
# no workers exit.
self.assertEqual(76, self.strategy.loop_timeout())
# Check the default
del self.conf['ring_check_interval']
self.strategy = wsgi.ServersPerPortStrategy(self.conf, self.logger,
self.servers_per_port)
self.assertEqual(15, self.strategy.loop_timeout())
def test_no_fork_sock(self):
self.assertIsNone(self.strategy.no_fork_sock())
def test_new_worker_socks(self):
self.all_bind_ports_for_node.reset_mock()
pid = 88
got_si = []
for s, i in self.strategy.new_worker_socks():
got_si.append((s, i))
self.strategy.register_worker_start(s, i, pid)
pid += 1
self.assertEqual([
(self.sockets[0], (6006, 0)),
(self.sockets[1], (6006, 1)),
(self.sockets[2], (6006, 2)),
(self.sockets[3], (6007, 0)),
(self.sockets[4], (6007, 1)),
(self.sockets[5], (6007, 2)),
], got_si)
self.assertEqual([
'Started child %d (PID %d) for port %d' % (0, 88, 6006),
'Started child %d (PID %d) for port %d' % (1, 89, 6006),
'Started child %d (PID %d) for port %d' % (2, 90, 6006),
'Started child %d (PID %d) for port %d' % (0, 91, 6007),
'Started child %d (PID %d) for port %d' % (1, 92, 6007),
'Started child %d (PID %d) for port %d' % (2, 93, 6007),
], self.logger.get_lines_for_level('notice'))
self.logger._clear()
# Steady-state...
self.assertEqual([], list(self.strategy.new_worker_socks()))
self.all_bind_ports_for_node.reset_mock()
# Get rid of servers for ports which disappear from the ring
self.ports = (6007,)
self.all_bind_ports_for_node.return_value = set(self.ports)
for s in self.sockets:
s.reset_mock()
with mock.patch('swift.common.wsgi.greenio') as mock_greenio:
self.assertEqual([], list(self.strategy.new_worker_socks()))
self.assertEqual([
mock.call(), # ring_check_interval has passed...
], self.all_bind_ports_for_node.mock_calls)
self.assertEqual([
[mock.call.close()]
for _ in range(3)
], [s.mock_calls for s in self.sockets[:3]])
self.assertEqual({
('shutdown_safe', (self.sockets[0],)),
('shutdown_safe', (self.sockets[1],)),
('shutdown_safe', (self.sockets[2],)),
}, {call[:2] for call in mock_greenio.mock_calls})
self.assertEqual([
[] for _ in range(3)
], [s.mock_calls for s in self.sockets[3:]]) # not closed
self.assertEqual({
'Closing unnecessary sock for port %d (child pid %d)' % (6006, p)
for p in range(88, 91)
}, set(self.logger.get_lines_for_level('notice')))
self.logger._clear()
# Create new socket & workers for new ports that appear in ring
self.ports = (6007, 6009)
self.all_bind_ports_for_node.return_value = set(self.ports)
for s in self.sockets:
s.reset_mock()
self.mock_get_socket.side_effect = Exception('ack')
# But first make sure we handle failure to bind to the requested port!
got_si = []
for s, i in self.strategy.new_worker_socks():
got_si.append((s, i))
self.strategy.register_worker_start(s, i, pid)
pid += 1
self.assertEqual([], got_si)
self.assertEqual([
'Unable to bind to port %d: %s' % (6009, Exception('ack')),
'Unable to bind to port %d: %s' % (6009, Exception('ack')),
'Unable to bind to port %d: %s' % (6009, Exception('ack')),
], self.logger.get_lines_for_level('critical'))
self.logger._clear()
# Will keep trying, so let it succeed again
new_sockets = self.mock_get_socket.side_effect = [
mock.MagicMock() for _ in range(3)]
got_si = []
for s, i in self.strategy.new_worker_socks():
got_si.append((s, i))
self.strategy.register_worker_start(s, i, pid)
pid += 1
self.assertEqual([
(s, (6009, i)) for i, s in enumerate(new_sockets)
], got_si)
self.assertEqual([
'Started child %d (PID %d) for port %d' % (0, 94, 6009),
'Started child %d (PID %d) for port %d' % (1, 95, 6009),
'Started child %d (PID %d) for port %d' % (2, 96, 6009),
], self.logger.get_lines_for_level('notice'))
self.logger._clear()
# Steady-state...
self.assertEqual([], list(self.strategy.new_worker_socks()))
self.all_bind_ports_for_node.reset_mock()
# Restart a guy who died on us
self.strategy.register_worker_exit(95) # server_idx == 1
# TODO: check that the socket got cleaned up
new_socket = mock.MagicMock()
self.mock_get_socket.side_effect = [new_socket]
got_si = []
for s, i in self.strategy.new_worker_socks():
got_si.append((s, i))
self.strategy.register_worker_start(s, i, pid)
pid += 1
self.assertEqual([
(new_socket, (6009, 1)),
], got_si)
self.assertEqual([
'Started child %d (PID %d) for port %d' % (1, 97, 6009),
], self.logger.get_lines_for_level('notice'))
self.logger._clear()
# Check log_sock_exit
self.strategy.log_sock_exit(self.sockets[5], (6007, 2))
self.assertEqual([
'Child %d (PID %d, port %d) exiting normally' % (
2, os.getpid(), 6007),
], self.logger.get_lines_for_level('notice'))
# It's ok to register_worker_exit for a PID that's already had its
# socket closed due to orphaning.
# This is one of the workers for port 6006 that already got reaped.
self.assertIsNone(self.strategy.register_worker_exit(89))
def test_servers_per_port_in_container(self):
# normally there's no configured ring_ip
conf = {
'bind_ip': '1.2.3.4',
}
self.strategy = wsgi.ServersPerPortStrategy(conf, self.logger, 1)
self.assertEqual(self.mock_cache_class.call_args,
mock.call('/etc/swift', '1.2.3.4'))
self.assertEqual({6006, 6007},
self.strategy.cache.all_bind_ports_for_node())
ports = {item[1][0] for item in self.strategy.new_worker_socks()}
self.assertEqual({6006, 6007}, ports)
# but in a container we can override it
conf = {
'bind_ip': '1.2.3.4',
'ring_ip': '2.3.4.5'
}
self.strategy = wsgi.ServersPerPortStrategy(conf, self.logger, 1)
# N.B. our fake BindPortsCache always returns {6006, 6007}, but a real
# BindPortsCache would only return ports for devices that match the ip
# address in the ring
self.assertEqual(self.mock_cache_class.call_args,
mock.call('/etc/swift', '2.3.4.5'))
self.assertEqual({6006, 6007},
self.strategy.cache.all_bind_ports_for_node())
ports = {item[1][0] for item in self.strategy.new_worker_socks()}
self.assertEqual({6006, 6007}, ports)
def test_shutdown_sockets(self):
pid = 88
for s, i in self.strategy.new_worker_socks():
self.strategy.register_worker_start(s, i, pid)
pid += 1
with mock.patch('swift.common.wsgi.greenio') as mock_greenio:
self.strategy.shutdown_sockets()
self.assertEqual([
mock.call.shutdown_safe(s)
for s in self.sockets
], mock_greenio.mock_calls)
self.assertEqual([
[mock.call.close()]
for _ in range(3)
], [s.mock_calls for s in self.sockets[:3]])
class TestWorkersStrategy(unittest.TestCase, CommonTestMixin):
def setUp(self):
self.logger = debug_logger()
self.conf = {
'workers': 2,
'user': 'bob',
}
self.strategy = wsgi.WorkersStrategy(self.conf, self.logger)
self.mock_socket = mock.Mock()
patcher = mock.patch('swift.common.wsgi.get_socket',
return_value=self.mock_socket)
self.mock_get_socket = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.drop_privileges')
self.mock_drop_privileges = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('swift.common.wsgi.clean_up_daemon_hygiene')
self.mock_clean_up_daemon_hygene = patcher.start()
self.addCleanup(patcher.stop)
def test_loop_timeout(self):
# This strategy should sit in the green.os.wait() for a bit (to avoid
# busy-waiting) but not forever (so the keep-running flag actually
# gets checked).
self.assertEqual(0.5, self.strategy.loop_timeout())
def test_no_fork_sock(self):
self.assertIsNone(self.strategy.no_fork_sock())
self.conf['workers'] = 0
self.strategy = wsgi.WorkersStrategy(self.conf, self.logger)
self.assertIs(self.mock_socket, self.strategy.no_fork_sock())
def test_new_worker_socks(self):
pid = 88
sock_count = 0
for s, i in self.strategy.new_worker_socks():
self.assertEqual(self.mock_socket, s)
self.assertIsNone(i) # unused for this strategy
self.strategy.register_worker_start(s, 'unused', pid)
pid += 1
sock_count += 1
mypid = os.getpid()
self.assertEqual([
'Started child %s from parent %s' % (88, mypid),
'Started child %s from parent %s' % (89, mypid),
], self.logger.get_lines_for_level('notice'))
self.assertEqual(2, sock_count)
self.assertEqual([], list(self.strategy.new_worker_socks()))
sock_count = 0
self.strategy.register_worker_exit(88)
self.assertEqual([
'Removing dead child %s from parent %s' % (88, mypid)
], self.logger.get_lines_for_level('error'))
for s, i in self.strategy.new_worker_socks():
self.assertEqual(self.mock_socket, s)
self.assertIsNone(i) # unused for this strategy
self.strategy.register_worker_start(s, 'unused', pid)
pid += 1
sock_count += 1
self.assertEqual(1, sock_count)
self.assertEqual([
'Started child %s from parent %s' % (88, mypid),
'Started child %s from parent %s' % (89, mypid),
'Started child %s from parent %s' % (90, mypid),
], self.logger.get_lines_for_level('notice'))
def test_shutdown_sockets(self):
self.mock_get_socket.side_effect = sockets = [
mock.MagicMock(), mock.MagicMock()]
pid = 88
for s, i in self.strategy.new_worker_socks():
self.strategy.register_worker_start(s, 'unused', pid)
pid += 1
with mock.patch('swift.common.wsgi.greenio') as mock_greenio:
self.strategy.shutdown_sockets()
self.assertEqual([
mock.call.shutdown_safe(s)
for s in sockets
], mock_greenio.mock_calls)
self.assertEqual([
[mock.call.close()] for _ in range(2)
], [s.mock_calls for s in sockets])
def test_log_sock_exit(self):
self.strategy.log_sock_exit('blahblah', 'blahblah')
my_pid = os.getpid()
self.assertEqual([
'Child %d exiting normally' % my_pid,
], self.logger.get_lines_for_level('notice'))
class TestWSGIContext(unittest.TestCase):
def test_app_call(self):
statuses = ['200 Ok', '404 Not Found']
def app(env, start_response):
start_response(statuses.pop(0), [('Content-Length', '3')])
yield b'Ok\n'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEqual(wc._response_status, '200 Ok')
self.assertEqual(b''.join(it), b'Ok\n')
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEqual(wc._response_status, '404 Not Found')
self.assertEqual(b''.join(it), b'Ok\n')
def test_app_iter_is_closable(self):
def app(env, start_response):
yield b''
yield b''
start_response('200 OK', [('Content-Length', '25')])
yield b'aaaaa'
yield b'bbbbb'
yield b'ccccc'
yield b'ddddd'
yield b'eeeee'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
iterable = wc._app_call(r.environ)
self.assertEqual(wc._response_status, '200 OK')
iterator = iter(iterable)
self.assertEqual(b'aaaaa', next(iterator))
self.assertEqual(b'bbbbb', next(iterator))
iterable.close()
with self.assertRaises(StopIteration):
next(iterator)
def test_update_content_length(self):
statuses = ['200 Ok']
def app(env, start_response):
start_response(statuses.pop(0), [('Content-Length', '30')])
yield b'Ok\n'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
it = wc._app_call(r.environ)
wc.update_content_length(35)
self.assertEqual(wc._response_status, '200 Ok')
self.assertEqual(b''.join(it), b'Ok\n')
self.assertEqual(wc._response_headers, [('Content-Length', '35')])
def test_app_returns_headers_as_dict_items(self):
statuses = ['200 Ok']
def app(env, start_response):
start_response(statuses.pop(0), {'Content-Length': '3'}.items())
yield b'Ok\n'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
it = wc._app_call(r.environ)
wc._response_headers.append(('X-Trans-Id', 'txn'))
self.assertEqual(wc._response_status, '200 Ok')
self.assertEqual(b''.join(it), b'Ok\n')
self.assertEqual(wc._response_headers, [
('Content-Length', '3'),
('X-Trans-Id', 'txn'),
])
class TestPipelineWrapper(unittest.TestCase):
def setUp(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = healthcheck catch_errors tempurl proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:tempurl]
paste.filter_factory = swift.common.middleware.tempurl:filter_factory
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
ctx = wsgi.loadcontext(loadwsgi.APP, conf_file, global_conf={})
self.pipe = wsgi.PipelineWrapper(ctx)
def _entry_point_names(self):
# Helper method to return a list of the entry point names for the
# filters in the pipeline.
return [c.entry_point_name for c in self.pipe.context.filter_contexts]
def test_startswith(self):
self.assertTrue(self.pipe.startswith("healthcheck"))
self.assertFalse(self.pipe.startswith("tempurl"))
def test_startswith_no_filters(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
ctx = wsgi.loadcontext(loadwsgi.APP, conf_file, global_conf={})
pipe = wsgi.PipelineWrapper(ctx)
self.assertTrue(pipe.startswith('proxy'))
def test_insert_filter(self):
original_modules = ['healthcheck', 'catch_errors', None]
self.assertEqual(self._entry_point_names(), original_modules)
self.pipe.insert_filter(self.pipe.create_filter('catch_errors'))
expected_modules = ['catch_errors', 'healthcheck',
'catch_errors', None]
self.assertEqual(self._entry_point_names(), expected_modules)
def test_str(self):
self.assertEqual(
str(self.pipe),
"healthcheck catch_errors tempurl proxy-server")
def test_str_unknown_filter(self):
del self.pipe.context.filter_contexts[0].__dict__['name']
self.pipe.context.filter_contexts[0].object = 'mysterious'
self.assertEqual(
str(self.pipe),
"<unknown> catch_errors tempurl proxy-server")
@patch_policies
class TestPipelineModification(unittest.TestCase):
def pipeline_modules(self, app):
# This is rather brittle; it'll break if a middleware stores its app
# anywhere other than an attribute named "app", but it works for now.
pipe = []
for _ in range(1000):
if app.__class__.__module__ == \
'swift.common.middleware.versioned_writes.legacy':
pipe.append('swift.common.middleware.versioned_writes')
else:
pipe.append(app.__class__.__module__)
if not hasattr(app, 'app'):
break
app = app.app
return pipe
def test_load_app(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = healthcheck proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:healthcheck]
use = egg:swift#healthcheck
"""
def modify_func(app, pipe):
new = pipe.create_filter('catch_errors')
pipe.insert_filter(new)
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch(
'swift.proxy.server.Application.modify_wsgi_pipeline',
modify_func):
app = wsgi.loadapp(conf_file, global_conf={})
exp = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assertTrue(isinstance(app, exp), app)
exp = swift.common.middleware.healthcheck.HealthCheckMiddleware
self.assertTrue(isinstance(app.app, exp), app.app)
exp = swift.proxy.server.Application
self.assertTrue(isinstance(app.app.app, exp), app.app.app)
# Everybody gets a reference to the final app, too
self.assertIs(app.app.app, app._pipeline_final_app)
self.assertIs(app.app.app, app._pipeline_request_logging_app)
self.assertIs(app.app.app, app.app._pipeline_final_app)
self.assertIs(app.app.app, app.app._pipeline_request_logging_app)
self.assertIs(app.app.app, app.app.app._pipeline_final_app)
exp_pipeline = [app, app.app, app.app.app]
self.assertEqual(exp_pipeline, app._pipeline)
self.assertEqual(exp_pipeline, app.app._pipeline)
self.assertEqual(exp_pipeline, app.app.app._pipeline)
self.assertIs(app._pipeline, app.app._pipeline)
self.assertIs(app._pipeline, app.app.app._pipeline)
# make sure you can turn off the pipeline modification if you want
def blow_up(*_, **__):
raise self.fail("needs more struts")
with mock.patch(
'swift.proxy.server.Application.modify_wsgi_pipeline',
blow_up):
app = wsgi.loadapp(conf_file, global_conf={},
allow_modify_pipeline=False)
# the pipeline was untouched
exp = swift.common.middleware.healthcheck.HealthCheckMiddleware
self.assertTrue(isinstance(app, exp), app)
exp = swift.proxy.server.Application
self.assertTrue(isinstance(app.app, exp), app.app)
def test_load_app_request_logging_app(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = catch_errors proxy_logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:proxy_logging]
use = egg:swift#proxy_logging
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app = wsgi.loadapp(conf_file, global_conf={})
self.assertEqual(self.pipeline_modules(app),
['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.proxy_logging',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server'])
pipeline = app._pipeline
logging_app = app._pipeline_request_logging_app
final_app = app._pipeline_final_app
# Sanity check -- loadapp returns the start of the pipeline
self.assertIs(app, pipeline[0])
# ... and the final_app is the end
self.assertIs(final_app, pipeline[-1])
# The logging app is its own special short pipeline
self.assertEqual(self.pipeline_modules(logging_app), [
'swift.common.middleware.proxy_logging',
'swift.proxy.server'])
self.assertNotIn(logging_app, pipeline)
self.assertIs(logging_app.app, final_app)
# All the apps in the main pipeline got decorated identically
for app in pipeline:
self.assertIs(app._pipeline, pipeline)
self.assertIs(app._pipeline_request_logging_app, logging_app)
self.assertIs(app._pipeline_final_app, final_app)
# Special logging app got them, too
self.assertIs(logging_app._pipeline_request_logging_app,
logging_app)
self.assertIs(logging_app._pipeline_final_app, final_app)
# Though the pipeline's different -- may or may not matter?
self.assertEqual(logging_app._pipeline, [logging_app, final_app])
def test_proxy_unmodified_wsgi_pipeline(self):
# Make sure things are sane even when we modify nothing
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = catch_errors gatekeeper proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:gatekeeper]
use = egg:swift#gatekeeper
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app = wsgi.loadapp(conf_file, global_conf={})
self.assertEqual(self.pipeline_modules(app),
['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server'])
def test_proxy_modify_wsgi_pipeline(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = healthcheck proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:healthcheck]
use = egg:swift#healthcheck
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app = wsgi.loadapp(conf_file, global_conf={})
self.assertEqual(self.pipeline_modules(app),
['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.common.middleware.healthcheck',
'swift.proxy.server'])
def test_proxy_modify_wsgi_pipeline_recommended_pipelines(self):
to_test = [
# Version, filter-only pipeline, expected final pipeline
('1.4.1',
'catch_errors healthcheck cache ratelimit tempauth',
'catch_errors gatekeeper healthcheck memcache'
' listing_formats ratelimit tempauth copy dlo versioned_writes'),
('1.5.0',
'catch_errors healthcheck cache ratelimit tempauth proxy-logging',
'catch_errors gatekeeper healthcheck memcache ratelimit tempauth'
' proxy_logging listing_formats copy dlo versioned_writes'),
('1.8.0',
'catch_errors healthcheck proxy-logging cache slo ratelimit'
' tempauth container-quotas account-quotas proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats slo ratelimit tempauth copy dlo'
' versioned_writes container_quotas account_quotas'
' proxy_logging'),
('1.9.1',
'catch_errors healthcheck proxy-logging cache bulk slo ratelimit'
' tempauth container-quotas account-quotas proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats bulk slo ratelimit tempauth copy dlo'
' versioned_writes container_quotas account_quotas'
' proxy_logging'),
('1.12.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk slo ratelimit tempauth container-quotas'
' account-quotas proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk slo ratelimit tempauth'
' copy dlo versioned_writes container_quotas account_quotas'
' proxy_logging'),
('1.13.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk slo dlo ratelimit tempauth'
' container-quotas account-quotas proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk slo dlo ratelimit'
' tempauth copy versioned_writes container_quotas account_quotas'
' proxy_logging'),
('1.13.1',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk tempurl slo dlo ratelimit tempauth'
' container-quotas account-quotas proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk tempurl slo dlo ratelimit'
' tempauth copy versioned_writes container_quotas account_quotas'
' proxy_logging'),
('2.0.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk tempurl ratelimit tempauth container-quotas'
' account-quotas slo dlo proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk tempurl ratelimit tempauth'
' copy container_quotas account_quotas slo dlo versioned_writes'
' proxy_logging'),
('2.4.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk tempurl ratelimit tempauth container-quotas'
' account-quotas slo dlo versioned_writes proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk tempurl ratelimit tempauth'
' copy container_quotas account_quotas slo dlo versioned_writes'
' proxy_logging'),
('2.8.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' container_sync bulk tempurl ratelimit tempauth copy'
' container-quotas account-quotas slo dlo versioned_writes'
' proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk tempurl ratelimit tempauth'
' copy container_quotas account_quotas slo dlo versioned_writes'
' proxy_logging'),
('2.16.0',
'catch_errors gatekeeper healthcheck proxy-logging cache'
' listing_formats container_sync bulk tempurl ratelimit'
' tempauth copy container-quotas account-quotas slo dlo'
' versioned_writes proxy-logging',
'catch_errors gatekeeper healthcheck proxy_logging memcache'
' listing_formats container_sync bulk tempurl ratelimit'
' tempauth copy container_quotas account_quotas slo dlo'
' versioned_writes proxy_logging'),
]
config = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = %s proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:cache]
use = egg:swift#memcache
[filter:listing_formats]
use = egg:swift#listing_formats
[filter:container_sync]
use = egg:swift#container_sync
[filter:bulk]
use = egg:swift#bulk
[filter:tempurl]
use = egg:swift#tempurl
[filter:ratelimit]
use = egg:swift#ratelimit
[filter:tempauth]
use = egg:swift#tempauth
user_test_tester = t%%sting .admin
[filter:copy]
use = egg:swift#copy
[filter:container-quotas]
use = egg:swift#container_quotas
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:slo]
use = egg:swift#slo
[filter:dlo]
use = egg:swift#dlo
[filter:versioned_writes]
use = egg:swift#versioned_writes
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
_fake_rings(t)
for version, pipeline, expected in to_test:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
to_write = contents % (t, pipeline)
# Sanity check that the password only has one % in it
self.assertIn('t%sting', to_write)
f.write(to_write)
app = wsgi.loadapp(conf_file, global_conf={})
actual = ' '.join(m.rsplit('.', 1)[1]
for m in self.pipeline_modules(app)[:-1])
self.assertEqual(
expected, actual,
'Pipeline mismatch for version %s: got\n %s\n'
'but expected\n %s' % (version, actual, expected))
def test_proxy_modify_wsgi_pipeline_inserts_versioned_writes(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = slo dlo healthcheck proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:dlo]
use = egg:swift#dlo
[filter:slo]
use = egg:swift#slo
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app = wsgi.loadapp(conf_file, global_conf={})
self.assertEqual(self.pipeline_modules(app),
['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.slo',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.common.middleware.healthcheck',
'swift.proxy.server'])
def test_proxy_modify_wsgi_pipeline_ordering(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = healthcheck proxy-logging bulk tempurl proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:bulk]
use = egg:swift#bulk
[filter:tempurl]
use = egg:swift#tempurl
"""
new_req_filters = [
# not in pipeline, no afters
{'name': 'catch_errors'},
# already in pipeline
{'name': 'proxy_logging',
'after_fn': lambda _: ['catch_errors']},
# not in pipeline, comes after more than one thing
{'name': 'container_quotas',
'after_fn': lambda _: ['catch_errors', 'bulk']}]
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with mock.patch.object(swift.proxy.server, 'required_filters',
new_req_filters):
app = wsgi.loadapp(conf_file, global_conf={})
self.assertEqual(self.pipeline_modules(app), [
'swift.common.middleware.catch_errors',
'swift.common.middleware.healthcheck',
'swift.common.middleware.proxy_logging',
'swift.common.middleware.bulk',
'swift.common.middleware.container_quotas',
'swift.common.middleware.tempurl',
'swift.proxy.server'])
def _proxy_modify_wsgi_pipeline(self, pipe):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = %s
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:gatekeeper]
use = egg:swift#gatekeeper
"""
config = config % (pipe,)
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app = wsgi.loadapp(conf_file, global_conf={})
return app
def test_gatekeeper_insertion_catch_errors_configured_at_start(self):
# catch_errors is configured at start, gatekeeper is not configured,
# so gatekeeper should be inserted just after catch_errors
pipe = 'catch_errors healthcheck proxy-server'
app = self._proxy_modify_wsgi_pipeline(pipe)
self.assertEqual(self.pipeline_modules(app), [
'swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.common.middleware.healthcheck',
'swift.proxy.server'])
def test_gatekeeper_insertion_catch_errors_configured_not_at_start(self):
# catch_errors is configured, gatekeeper is not configured, so
# gatekeeper should be inserted at start of pipeline
pipe = 'healthcheck catch_errors proxy-server'
app = self._proxy_modify_wsgi_pipeline(pipe)
self.assertEqual(self.pipeline_modules(app), [
'swift.common.middleware.gatekeeper',
'swift.common.middleware.healthcheck',
'swift.common.middleware.catch_errors',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server'])
def test_catch_errors_gatekeeper_configured_not_at_start(self):
# catch_errors is configured, gatekeeper is configured, so
# no change should be made to pipeline
pipe = 'healthcheck catch_errors gatekeeper proxy-server'
app = self._proxy_modify_wsgi_pipeline(pipe)
self.assertEqual(self.pipeline_modules(app), [
'swift.common.middleware.healthcheck',
'swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper',
'swift.common.middleware.listing_formats',
'swift.common.middleware.copy',
'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server'])
@with_tempdir
def test_loadapp_proxy(self, tempdir):
conf_path = os.path.join(tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
_fake_rings(tempdir)
account_ring_path = os.path.join(tempdir, 'account.ring.gz')
container_ring_path = os.path.join(tempdir, 'container.ring.gz')
object_ring_paths = {}
for policy in POLICIES:
object_ring_paths[int(policy)] = os.path.join(
tempdir, policy.ring_name + '.ring.gz')
app = wsgi.loadapp(conf_path)
proxy_app = app._pipeline_final_app
self.assertEqual(proxy_app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(proxy_app.container_ring.serialized_path,
container_ring_path)
for policy_index, expected_path in object_ring_paths.items():
object_ring = proxy_app.get_object_ring(policy_index)
self.assertEqual(expected_path, object_ring.serialized_path)
@with_tempdir
def test_loadapp_storage(self, tempdir):
expectations = {
'object': obj_server.ObjectController,
'container': container_server.ContainerController,
'account': account_server.AccountController,
}
for server_type, controller in expectations.items():
conf_path = os.path.join(
tempdir, '%s-server.conf' % server_type)
conf_body = """
[DEFAULT]
swift_dir = %s
[app:main]
use = egg:swift#%s
""" % (tempdir, server_type)
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
app = wsgi.loadapp(conf_path)
self.assertTrue(isinstance(app, controller))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_wsgi.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test.unit import temptree
import mock
import os
import sys
import resource
import signal
import errno
from collections import defaultdict
from time import sleep, time
import tempfile
from six.moves import reload_module
from swift.common import manager
from swift.common.exceptions import InvalidPidFileException
import eventlet
threading = eventlet.patcher.original('threading')
DUMMY_SIG = 1
class MockOs(object):
RAISE_EPERM_SIG = 99
def __init__(self, pids):
self.running_pids = pids
self.pid_sigs = defaultdict(list)
self.closed_fds = []
self.child_pid = 9999 # fork defaults to test parent process path
self.execlp_called = False
def kill(self, pid, sig):
if sig == self.RAISE_EPERM_SIG:
raise OSError(errno.EPERM, 'Operation not permitted')
if pid not in self.running_pids:
raise OSError(3, 'No such process')
self.pid_sigs[pid].append(sig)
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
def pop_stream(f):
"""read everything out of file from the top and clear it out
"""
f.flush()
f.seek(0)
output = f.read()
f.seek(0)
f.truncate()
return output
class TestManagerModule(unittest.TestCase):
def test_servers(self):
main_plus_rest = set(manager.MAIN_SERVERS + manager.REST_SERVERS)
self.assertEqual(set(manager.ALL_SERVERS), main_plus_rest)
# make sure there's no server listed in both
self.assertEqual(len(main_plus_rest), len(manager.MAIN_SERVERS) +
len(manager.REST_SERVERS))
def test_setup_env(self):
class MockResource(object):
def __init__(self, error=None):
self.error = error
self.called_with_args = []
def setrlimit(self, resource, limits):
if self.error:
raise self.error
self.called_with_args.append((resource, limits))
def __getattr__(self, name):
# I only over-ride portions of the resource module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(resource, name)
_orig_resource = manager.resource
_orig_environ = os.environ
try:
manager.resource = MockResource()
manager.os.environ = {}
manager.setup_env()
expected = [
(resource.RLIMIT_NOFILE, (manager.MAX_DESCRIPTORS,
manager.MAX_DESCRIPTORS)),
(resource.RLIMIT_DATA, (manager.MAX_MEMORY,
manager.MAX_MEMORY)),
(resource.RLIMIT_NPROC, (manager.MAX_PROCS,
manager.MAX_PROCS)),
]
self.assertEqual(manager.resource.called_with_args, expected)
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith(
tempfile.gettempdir()))
# test error condition
manager.resource = MockResource(error=ValueError())
manager.os.environ = {}
manager.setup_env()
self.assertEqual(manager.resource.called_with_args, [])
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith(
tempfile.gettempdir()))
manager.resource = MockResource(error=OSError())
manager.os.environ = {}
self.assertRaises(OSError, manager.setup_env)
self.assertIsNone(manager.os.environ.get('PYTHON_EGG_CACHE'))
finally:
manager.resource = _orig_resource
os.environ = _orig_environ
def test_command_wrapper(self):
class MockManager(object):
def __init__(self, servers_):
self.servers = [manager.Server(server) for server in servers_]
@manager.command
def myfunc(self, arg1):
"""test doc
"""
return arg1
m = MockManager(['test'])
self.assertEqual(m.myfunc.__doc__.strip(), 'test doc')
self.assertEqual(m.myfunc(1), 1)
self.assertEqual(m.myfunc(0), 0)
self.assertEqual(m.myfunc(True), 1)
self.assertEqual(m.myfunc(False), 0)
self.assertTrue(hasattr(m.myfunc, 'publicly_accessible'))
self.assertTrue(m.myfunc.publicly_accessible)
def test_watch_server_pids(self):
class MockOs(object):
WNOHANG = os.WNOHANG
def __init__(self, pid_map=None):
if pid_map is None:
pid_map = {}
self.pid_map = {}
for pid, v in pid_map.items():
self.pid_map[pid] = (x for x in v)
def waitpid(self, pid, options):
try:
rv = next(self.pid_map[pid])
except StopIteration:
raise OSError(errno.ECHILD, os.strerror(errno.ECHILD))
except KeyError:
raise OSError(errno.ESRCH, os.strerror(errno.ESRCH))
if isinstance(rv, Exception):
raise rv
else:
return rv
class MockTime(object):
def __init__(self, ticks=None):
self.tock = time()
if not ticks:
ticks = []
self.ticks = (t for t in ticks)
def time(self):
try:
self.tock += next(self.ticks)
except StopIteration:
self.tock += 1
return self.tock
def sleep(*args):
return
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR, zombie=0):
self.heartbeat = (pids for _ in range(zombie))
def get_running_pids(self):
try:
rv = next(self.heartbeat)
return rv
except StopIteration:
return {}
_orig_os = manager.os
_orig_time = manager.time
_orig_server = manager.Server
try:
manager.time = MockTime()
manager.os = MockOs()
# this server always says it's dead when you ask for running pids
server = MockServer([1])
# list of pids keyed on servers to watch
server_pids = {
server: [1],
}
# basic test, server dies
gen = manager.watch_server_pids(server_pids)
expected = [(server, 1)]
self.assertEqual([x for x in gen], expected)
# start long running server and short interval
server = MockServer([1], zombie=15)
server_pids = {
server: [1],
}
gen = manager.watch_server_pids(server_pids)
self.assertEqual([x for x in gen], [])
# wait a little longer
gen = manager.watch_server_pids(server_pids, interval=15)
self.assertEqual([x for x in gen], [(server, 1)])
# zombie process
server = MockServer([1], zombie=200)
server_pids = {
server: [1],
}
# test weird os error
manager.os = MockOs({1: [OSError()]})
gen = manager.watch_server_pids(server_pids)
self.assertRaises(OSError, lambda: [x for x in gen])
# test multi-server
server1 = MockServer([1, 10], zombie=200)
server2 = MockServer([2, 20], zombie=8)
server_pids = {
server1: [1, 10],
server2: [2, 20],
}
pid_map = {
1: [None for _ in range(10)],
2: [None for _ in range(8)],
20: [None for _ in range(4)],
}
manager.os = MockOs(pid_map)
gen = manager.watch_server_pids(server_pids,
interval=manager.KILL_WAIT)
expected = [
(server2, 2),
(server2, 20),
]
self.assertEqual([x for x in gen], expected)
finally:
manager.os = _orig_os
manager.time = _orig_time
manager.Server = _orig_server
def test_safe_kill(self):
manager.os = MockOs([1, 2, 3, 4])
proc_files = (
('1/cmdline', 'same-procname'),
('2/cmdline', 'another-procname'),
('4/cmdline', 'another-procname'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
manager.safe_kill(1, signal.SIG_DFL, 'same-procname')
self.assertRaises(InvalidPidFileException, manager.safe_kill,
2, signal.SIG_DFL, 'same-procname')
manager.safe_kill(3, signal.SIG_DFL, 'same-procname')
manager.safe_kill(4, signal.SIGHUP, 'same-procname')
def test_exc(self):
self.assertTrue(issubclass(manager.UnknownCommandError, Exception))
def test_format_server_name(self):
self.assertEqual(
manager.format_server_name('proxy'),
("proxy-server", "swift-proxy-server"))
self.assertEqual(
manager.format_server_name('Proxy'),
("Proxy-server", "swift-Proxy-server"))
self.assertEqual(
manager.format_server_name(''),
("-server", "swift--server"))
def test_verify_server(self):
def mock_find_exe(f):
# pretend that swift-object-server is the only file on path
return f if f == 'swift-object-server' else None
with mock.patch('swift.common.manager.find_executable',
side_effect=mock_find_exe):
# test valid servers
self.assertTrue(manager.verify_server('object'))
self.assertTrue(manager.verify_server('object-server'))
self.assertTrue(manager.verify_server('object.replication'))
self.assertTrue(manager.verify_server('object-server.1'))
# test invalid servers
self.assertFalse(manager.verify_server('test'))
self.assertFalse(manager.verify_server('test-server'))
self.assertFalse(manager.verify_server('ls'))
self.assertFalse(manager.verify_server(''))
self.assertFalse(manager.verify_server('Object'))
self.assertFalse(manager.verify_server('object1'))
self.assertFalse(manager.verify_server(None))
class TestServer(unittest.TestCase):
def tearDown(self):
reload_module(manager)
def join_swift_dir(self, path):
return os.path.join(manager.SWIFT_DIR, path)
def join_run_dir(self, path):
return os.path.join(manager.RUN_DIR, path)
def test_create_server(self):
server = manager.Server('proxy')
self.assertEqual(server.server, 'proxy-server')
self.assertEqual(server.type, 'proxy')
self.assertEqual(server.cmd, 'swift-proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(server.server, 'object-replicator')
self.assertEqual(server.type, 'object')
self.assertEqual(server.cmd, 'swift-object-replicator')
def test_server_to_string(self):
server = manager.Server('Proxy')
self.assertEqual(str(server), 'proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(str(server), 'object-replicator')
def test_server_repr(self):
server = manager.Server('proxy')
self.assertTrue(server.__class__.__name__ in repr(server))
self.assertTrue(str(server) in repr(server))
def test_server_equality(self):
server1 = manager.Server('Proxy')
server2 = manager.Server('proxy-server')
self.assertEqual(server1, server2)
# it is NOT a string
self.assertNotEqual(server1, 'proxy-server')
def test_get_pid_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_custom_pid_file_name(self):
random_run_dir = "/random/dir"
get_random_run_dir = lambda x: os.path.join(random_run_dir, x)
server = manager.Server('proxy', run_dir=random_run_dir)
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = get_random_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator', run_dir=random_run_dir)
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = get_random_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor', run_dir=random_run_dir)
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = get_random_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_conf_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server_name = manager.STANDALONE_SERVERS[0]
server = manager.Server(server_name)
conf_file = self.join_swift_dir(server_name + '.conf')
pid_file = self.join_run_dir(server_name + '.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
def test_conf_files(self):
# test get single conf file
conf_files = (
'proxy-server.conf',
'proxy-server.ini',
'auth-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
proxy_conf = self.join_swift_dir('proxy-server.conf')
self.assertEqual(conf_file, proxy_conf)
# test multi server conf files & grouping of server-type config
conf_files = (
'object-server1.conf',
'object-server/2.conf',
'object-server/object3.conf',
'object-server/conf/server4.conf',
'object-server.txt',
'proxy-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 4)
c1 = self.join_swift_dir('object-server1.conf')
c2 = self.join_swift_dir('object-server/2.conf')
c3 = self.join_swift_dir('object-server/object3.conf')
c4 = self.join_swift_dir('object-server/conf/server4.conf')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_files)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_files, sorted_confs)
# test get single numbered conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account')
conf_files = server.conf_files(number=2)
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test missing config number
conf_files = server.conf_files(number=5)
self.assertFalse(conf_files)
# test getting specific conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account.2')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test verbose & quiet
conf_files = (
'auth-server.ini',
'container-server/1.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
server = manager.Server('auth')
# check warn "unable to locate"
conf_files = server.conf_files()
self.assertFalse(conf_files)
self.assertTrue('unable to locate config for auth'
in pop_stream(f).lower())
# check quiet will silence warning
conf_files = server.conf_files(verbose=True, quiet=True)
self.assertEqual(pop_stream(f), '')
# check found config no warning
server = manager.Server('container-auditor')
conf_files = server.conf_files()
self.assertEqual(pop_stream(f), '')
# check missing config number warn "unable to locate"
conf_files = server.conf_files(number=2)
self.assertTrue(
'unable to locate config number 2 for ' +
'container-auditor' in pop_stream(f).lower())
# check verbose lists configs
conf_files = server.conf_files(number=2, verbose=True)
c1 = self.join_swift_dir('container-server/1.conf')
self.assertIn(c1, pop_stream(f))
finally:
sys.stdout = old_stdout
# test standalone conf file
server_name = manager.STANDALONE_SERVERS[0]
conf_files = (server_name + '.conf',)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server(server_name)
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
conf = self.join_swift_dir(server_name + '.conf')
self.assertEqual(conf_file, conf)
def _test_expirer_conf_files(self, files_and_contents, expected_files):
files, contents = zip(*files_and_contents)
with temptree(files, contents) as t:
manager.SWIFT_DIR = t
expected_files = [self.join_swift_dir(f) for f in expected_files]
def assert_results(quiet, verbose):
original_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as stdout:
sys.stdout = stdout
server = manager.Server('object-expirer')
conf_files = server.conf_files(verbose=verbose,
quiet=quiet)
messages = pop_stream(stdout)
finally:
sys.stdout = original_stdout
self.assertEqual(conf_files, expected_files)
if any(["expirer" in f for f in expected_files]) and not quiet:
self.assertIn(
"object-expirer.conf is deprecated.", messages)
if verbose:
for f in expected_files:
self.assertIn(f, messages)
elif not expected_files and not quiet:
self.assertIn("Unable to locate config", messages)
else:
self.assertEqual(messages, "")
assert_results(quiet=True, verbose=False)
assert_results(quiet=False, verbose=False)
assert_results(quiet=False, verbose=True)
def test_expirer_conf_files(self):
self._test_expirer_conf_files(
[('object-expirer.conf', '')], ['object-expirer.conf'])
self._test_expirer_conf_files(
[('object-server.conf', '')], [])
self._test_expirer_conf_files(
[('object-server.conf', '[object-expirer]')],
['object-server.conf'])
self._test_expirer_conf_files([
('object-server/1.conf', ''),
('object-server/2.conf', ''),
('object-server/3.conf', ''),
('object-server/4.conf', ''),
], [])
self._test_expirer_conf_files([
('object-server/1.conf', '[object-expirer]'),
('object-server/2.conf', ''),
('object-server/3.conf', ''),
('object-server/4.conf', ''),
], ['object-server/1.conf'])
self._test_expirer_conf_files([
('object-server/1.conf', '[object-expirer]'),
('object-server/2.conf', '[object-expirer]'),
('object-server/3.conf', '[object-expirer]'),
('object-server/4.conf', '[object-expirer]'),
], [
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
])
self._test_expirer_conf_files([
('object-server.conf', ''),
('object-expirer.conf', ''),
], ['object-expirer.conf'])
self._test_expirer_conf_files([
('object-server.conf', '[object-expirer]'),
('object-expirer.conf', ''),
], ['object-server.conf'])
self._test_expirer_conf_files([
('object-server/1.conf', ''),
('object-server/2.conf', ''),
('object-server/3.conf', ''),
('object-server/4.conf', ''),
('object-expirer.conf', ''),
], ['object-expirer.conf'])
self._test_expirer_conf_files([
('object-server/1.conf', '[object-expirer]'),
('object-server/2.conf', ''),
('object-server/3.conf', ''),
('object-server/4.conf', ''),
('object-expirer.conf', ''),
], ['object-server/1.conf'])
self._test_expirer_conf_files([
('object-server/1.conf', '[object-expirer]'),
('object-server/2.conf', '[object-expirer]'),
('object-server/3.conf', '[object-expirer]'),
('object-server/4.conf', '[object-expirer]'),
('object-expirer.conf', ''),
], [
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
])
self._test_expirer_conf_files([
('object-server/1.conf.d/20_setting.conf', '[object-expirer]'),
('object-server/2.conf.d/20_setting.conf', '[object-expirer]'),
('object-server/3.conf.d/20_setting.conf', '[object-expirer]'),
('object-server/4.conf.d/20_setting.conf', '[object-expirer]'),
], [
'object-server/1.conf.d',
'object-server/2.conf.d',
'object-server/3.conf.d',
'object-server/4.conf.d',
])
def test_proxy_conf_dir(self):
conf_files = (
'proxy-server.conf.d/00.conf',
'proxy-server.conf.d/01.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
proxy_conf_dir = self.join_swift_dir('proxy-server.conf.d')
self.assertEqual(proxy_conf_dir, conf_dir)
def test_named_conf_dir(self):
conf_files = (
'object-server/base.conf-template',
'object-server/object-server.conf.d/00_base.conf',
'object-server/object-server.conf.d/10_server.conf',
'object-server/object-replication.conf.d/00_base.conf',
'object-server/object-replication.conf.d/10_server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object.replication')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
replication_server_conf_dir = self.join_swift_dir(
'object-server/object-replication.conf.d')
self.assertEqual(replication_server_conf_dir, conf_dir)
# and again with no named filter
server = manager.Server('object')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 2)
for named_conf in ('server', 'replication'):
conf_dir = self.join_swift_dir(
'object-server/object-%s.conf.d' % named_conf)
self.assertTrue(conf_dir in conf_dirs)
def test_conf_dir(self):
conf_files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 4)
c1 = self.join_swift_dir('object-server/1.conf.d')
c2 = self.join_swift_dir('object-server/2.conf.d')
c3 = self.join_swift_dir('object-server/3.conf.d')
c4 = self.join_swift_dir('object-server/4.conf.d')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_dirs)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_dirs, sorted_confs)
def test_named_conf_dir_pid_files(self):
conf_files = (
'object-server/object-server.pid.d',
'object-server/object-replication.pid.d',
)
with temptree(conf_files) as t:
manager.RUN_DIR = t
server = manager.Server('object.replication', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
replication_server_pid = self.join_run_dir(
'object-server/object-replication.pid.d')
self.assertEqual(replication_server_pid, pid_file)
# and again with no named filter
server = manager.Server('object', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 2)
for named_pid in ('server', 'replication'):
pid_file = self.join_run_dir(
'object-server/object-%s.pid.d' % named_pid)
self.assertTrue(pid_file in pid_files)
def test_iter_pid_files(self):
"""
Server.iter_pid_files is kinda boring, test the
Server.pid_files stuff here as well
"""
pid_files = (
('proxy-server.pid', 1),
('auth-server.pid', 'blah'),
('object-replicator/1.pid', 11),
('object-replicator/2.pid', 12),
)
files, contents = zip(*pid_files)
with temptree(files, contents) as t:
manager.RUN_DIR = t
server = manager.Server('proxy', run_dir=t)
# test get one file
iterator = server.iter_pid_files()
pid_file, pid = next(iterator)
self.assertEqual(pid_file, self.join_run_dir('proxy-server.pid'))
self.assertEqual(pid, 1)
# ... and only one file
self.assertRaises(StopIteration, next, iterator)
# test invalid value in pid file
server = manager.Server('auth', run_dir=t)
pid_file, pid = next(server.iter_pid_files())
self.assertIsNone(pid)
# test object-server doesn't steal pids from object-replicator
server = manager.Server('object', run_dir=t)
self.assertRaises(StopIteration, next, server.iter_pid_files())
# test multi-pid iter
server = manager.Server('object-replicator', run_dir=t)
real_map = {
11: self.join_run_dir('object-replicator/1.pid'),
12: self.join_run_dir('object-replicator/2.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid_files by number
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test get all pid files
real_map = {
1: self.join_run_dir('object-server/1.pid'),
2: self.join_run_dir('object-server/2.pid'),
5: self.join_run_dir('object-server/5.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid with matching conf
pids = list(server.iter_pid_files(number=2))
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
# try to iter on a pid number with a matching conf but no pid
pids = list(server.iter_pid_files(number=3))
self.assertFalse(pids)
# test get pids w/o matching conf
pids = list(server.iter_pid_files(number=5))
self.assertFalse(pids)
# test get pid_files by conf name
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object.2', run_dir=t)
# test get pid with matching conf
pids = list(server.iter_pid_files())
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
def test_signal_pids(self):
temp_files = (
('var/run/zero-server.pid', 0),
('var/run/proxy-server.pid', 1),
('var/run/auth-server.pid', 2),
('var/run/one-server.pid', 3),
('var/run/object-server.pid', 4),
('var/run/invalid-server.pid', 'Forty-Two'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
# mock os with so both the first and second are running
manager.os = MockOs([1, 2])
server = manager.Server('proxy', run_dir=manager.RUN_DIR)
pids = server.signal_pids(DUMMY_SIG)
self.assertEqual(len(pids), 1)
self.assertIn(1, pids)
self.assertEqual(manager.os.pid_sigs[1], [DUMMY_SIG])
# make sure other process not signaled
self.assertNotIn(2, pids)
self.assertNotIn(2, manager.os.pid_sigs)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test print details
pids = server.signal_pids(DUMMY_SIG)
output = pop_stream(f)
self.assertIn('pid: %s' % 1, output)
self.assertIn('signal: %s' % DUMMY_SIG, output)
# test no details on signal.SIG_DFL
pids = server.signal_pids(signal.SIG_DFL)
self.assertEqual(pop_stream(f), '')
# reset mock os so only the second server is running
manager.os = MockOs([2])
# test pid not running
pids = server.signal_pids(signal.SIG_DFL)
self.assertNotIn(1, pids)
self.assertNotIn(1, manager.os.pid_sigs)
# test remove stale pid file
self.assertFalse(os.path.exists(
self.join_run_dir('proxy-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
server = manager.Server('auth', run_dir=manager.RUN_DIR)
# test verbose warns on removing stale pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
self.assertTrue('stale pid' in output.lower())
auth_pid = self.join_run_dir('auth-server.pid')
self.assertTrue(auth_pid in output)
# reset mock os so only the third server is running
manager.os = MockOs([3])
server = manager.Server('one', run_dir=manager.RUN_DIR)
# test verbose warns on removing invalid pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('removing pid file' in output.lower())
one_pid = self.join_run_dir('one-server.pid')
self.assertTrue(one_pid in output)
server = manager.Server('zero', run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('zero-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('zero-server.pid')))
server = manager.Server('invalid-server',
run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('invalid-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('invalid-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
# test warning with insufficient permissions
server = manager.Server('object', run_dir=manager.RUN_DIR)
pids = server.signal_pids(manager.os.RAISE_EPERM_SIG)
output = pop_stream(f)
self.assertTrue('no permission to signal pid 4' in
output.lower(), output)
finally:
sys.stdout = old_stdout
def test_get_running_pids(self):
# test only gets running pids
temp_files = (
('var/run/test-server1.pid', 1),
('var/run/test-server2.pid', 2),
('var/run/test-server3.pid', 3),
('proc/1/cmdline', 'swift-test-server'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
server = manager.Server(
'test-server', run_dir=manager.RUN_DIR)
# mock os, only pid '1' is running
manager.os = MockOs([1, 3])
running_pids = server.get_running_pids()
self.assertEqual(len(running_pids), 1)
self.assertIn(1, running_pids)
self.assertNotIn(2, running_pids)
self.assertNotIn(3, running_pids)
# test persistent running pid files
self.assertTrue(os.path.exists(
os.path.join(manager.RUN_DIR, 'test-server1.pid')))
# test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid')
self.assertFalse(os.path.exists(pid_two))
pid_three = self.join_swift_dir('test-server3.pid')
self.assertFalse(os.path.exists(pid_three))
# reset mock os, no pids running
manager.os = MockOs([])
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all pid files are cleaned out
pid_one = self.join_run_dir('test-server1.pid')
self.assertFalse(os.path.exists(pid_one))
all_pids = os.listdir(manager.RUN_DIR)
self.assertEqual(len(all_pids), 0)
# test only get pids for right server
pid_files = (
('thing-doer.pid', 1),
('thing-sayer.pid', 2),
('other-doer.pid', 3),
('other-sayer.pid', 4),
)
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# all pids are running
manager.os = MockOs(pids)
server = manager.Server('thing-doer', run_dir=t)
running_pids = server.get_running_pids()
# only thing-doer.pid, 1
self.assertEqual(len(running_pids), 1)
self.assertIn(1, running_pids)
# no other pids returned
for n in (2, 3, 4):
self.assertNotIn(n, running_pids)
# assert stale pids for other servers ignored
manager.os = MockOs([1]) # only thing-doer is running
running_pids = server.get_running_pids()
for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'):
# other server pid files persist
self.assertTrue(os.path.exists(os.path.join(t, f)))
# verify that servers are in fact not running
for server_name in ('thing-sayer', 'other-doer', 'other-sayer'):
server = manager.Server(server_name, run_dir=t)
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all OTHER pid files are cleaned out
all_pids = os.listdir(t)
self.assertEqual(len(all_pids), 1)
self.assertTrue(os.path.exists(os.path.join(t, 'thing-doer.pid')))
def test_kill_running_pids(self):
pid_files = (
('object-server.pid', 1),
('object-replicator1.pid', 11),
('object-replicator2.pid', 12),
)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test no servers running
manager.os = MockOs([])
pids = server.kill_running_pids()
self.assertFalse(pids, pids)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server.run_dir = t
# start up pid
manager.os = MockOs([1])
server = manager.Server('object', run_dir=t)
# test kill one pid
pids = server.kill_running_pids()
self.assertEqual(len(pids), 1)
self.assertIn(1, pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGTERM])
# reset os mock
manager.os = MockOs([1])
# test shutdown
self.assertTrue('object-server' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 1)
self.assertIn(1, pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGHUP])
# start up other servers
manager.os = MockOs([11, 12])
# test multi server kill & ignore graceful on unsupported server
self.assertFalse('object-replicator' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
server = manager.Server('object-replicator', run_dir=t)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 2)
for pid in (11, 12):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
# and the other pid is of course not signaled
self.assertNotIn(1, manager.os.pid_sigs)
def test_status(self):
conf_files = (
'test-server/1.conf',
'test-server/2.conf',
'test-server/3.conf',
'test-server/4.conf',
)
pid_files = (
('test-server/1.pid', 1),
('test-server/2.pid', 2),
('test-server/3.pid', 3),
('test-server/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# setup running servers
server = manager.Server('test', run_dir=t)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test status for all running
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
('4/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 4)
for line in output:
self.assertTrue('test-server running' in line)
# test get single server by number
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=4), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('test-server running' in line)
conf_four = self.join_swift_dir(conf_files[3])
self.assertTrue('4 - %s' % conf_four in line)
# test some servers not running
manager.os = MockOs([1, 2, 3])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 3)
for line in output:
self.assertTrue('test-server running' in line)
# test single server not running
manager.os = MockOs([1, 2])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=3), 1)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('not running' in line)
conf_three = self.join_swift_dir(conf_files[2])
self.assertTrue(conf_three in line)
# test no running pids
manager.os = MockOs([])
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 1)
output = pop_stream(f).lower()
self.assertTrue('no test-server running' in output)
# test use provided pids
pids = {
1: '1.pid',
2: '2.pid',
}
# shouldn't call get_running_pids
called = []
def mock(*args, **kwargs):
called.append(True)
server.get_running_pids = mock
status = server.status(pids=pids)
self.assertEqual(status, 0)
self.assertFalse(called)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 2)
for line in output:
self.assertTrue('test-server running' in line)
finally:
sys.stdout = old_stdout
def test_spawn(self):
# mocks
class MockProcess(object):
NOTHING = 'default besides None'
STDOUT = 'stdout'
PIPE = 'pipe'
def __init__(self, pids=None):
if pids is None:
pids = []
self.pids = (p for p in pids)
def Popen(self, args, **kwargs):
return MockProc(next(self.pids), args, **kwargs)
class MockProc(object):
def __init__(self, pid, args, stdout=MockProcess.NOTHING,
stderr=MockProcess.NOTHING):
self.pid = pid
self.args = args
self.stdout = stdout
if stderr == MockProcess.STDOUT:
self.stderr = self.stdout
else:
self.stderr = stderr
# setup running servers
server = manager.Server('test')
with temptree(['test-server.conf']) as swift_dir:
manager.SWIFT_DIR = swift_dir
with temptree([]) as t:
manager.RUN_DIR = t
server.run_dir = t
old_subprocess = manager.subprocess
try:
# test single server process calls spawn once
manager.subprocess = MockProcess([1])
conf_file = self.join_swift_dir('test-server.conf')
# spawn server no kwargs
server.spawn(conf_file)
# test pid file
pid_file = self.join_run_dir('test-server.pid')
self.assertTrue(os.path.exists(pid_file))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, 1)
# assert procs args
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = [
'swift-test-server',
conf_file,
]
self.assertEqual(proc.args, expected_args)
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test multi server process calls spawn multiple times
manager.subprocess = MockProcess([11, 12, 13, 14])
conf1 = self.join_swift_dir('test-server/1.conf')
conf2 = self.join_swift_dir('test-server/2.conf')
conf3 = self.join_swift_dir('test-server/3.conf')
conf4 = self.join_swift_dir('test-server/4.conf')
server = manager.Server('test', run_dir=t)
# test server run once
server.spawn(conf1, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = ['swift-test-server', conf1, 'once']
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test server not daemon
server.spawn(conf2, daemon=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 2)
proc = server.procs[1]
expected_args = ['swift-test-server', conf2, 'verbose']
self.assertEqual(proc.args, expected_args)
# assert stdout is not changed
self.assertIsNone(proc.stdout)
self.assertIsNone(proc.stderr)
# test server wait
server.spawn(conf3, wait=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 3)
proc = server.procs[2]
# assert stdout is /dev/null
with open('/dev/null', 'wb+') as fp:
self.assertTrue(isinstance(proc.stdout, type(fp)))
self.assertEqual(proc.stdout.name, os.devnull)
self.assertIn('b', proc.stdout.mode)
self.assertTrue(any(x in proc.stdout.mode for x in 'aw+'),
'mode must be writable, not %r' %
proc.stdout.mode)
self.assertEqual(proc.stderr, proc.stdout)
# test not daemon over-rides wait
server.spawn(conf4, wait=False, daemon=False, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 4)
proc = server.procs[3]
expected_args = ['swift-test-server', conf4, 'once',
'verbose']
self.assertEqual(proc.args, expected_args)
# daemon behavior should trump wait, once shouldn't matter
self.assertIsNone(proc.stdout)
self.assertIsNone(proc.stderr)
# assert pids
for i, proc in enumerate(server.procs):
pid_file = self.join_run_dir('test-server/%d.pid' %
(i + 1))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, proc.pid)
finally:
manager.subprocess = old_subprocess
def test_wait(self):
server = manager.Server('test')
self.assertEqual(server.wait(), 0)
class MockProcess(threading.Thread):
def __init__(self, delay=0.1, fail_to_start=False):
threading.Thread.__init__(self)
# setup pipe
rfd, wfd = os.pipe()
# subprocess connection to read stdout
self.stdout = os.fdopen(rfd, 'rb')
# real process connection to write stdout
self._stdout = os.fdopen(wfd, 'wb')
self.delay = delay
self.finished = False
self.returncode = None
if fail_to_start:
self._returncode = 1
self.run = self.fail
else:
self._returncode = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
if self.is_alive():
self.join()
def close_stdout(self):
self._stdout.flush()
with open(os.devnull, 'wb') as nullfile:
try:
os.dup2(nullfile.fileno(), self._stdout.fileno())
except OSError:
pass
def fail(self):
self._stdout.write(b'mock process started\n')
sleep(self.delay) # perform setup processing
self._stdout.write(b'mock process failed to start\n')
self.close_stdout()
def poll(self):
self.returncode = self._returncode
return self.returncode or None
def run(self):
self._stdout.write(b'mock process started\n')
sleep(self.delay) # perform setup processing
self._stdout.write(b'setup complete!\n')
self.close_stdout()
sleep(self.delay) # do some more processing
self._stdout.write(b'mock process finished\n')
self.finished = True
class MockTime(object):
def time(self):
return time()
def sleep(self, *args, **kwargs):
pass
with temptree([]) as t, open(os.path.join(t, 'output'), 'w+') as f, \
mock.patch.object(sys, 'stdout', f), \
mock.patch.object(manager, 'WARNING_WAIT', 0.01), \
mock.patch.object(manager, 'time', MockTime()):
# Note that we actually capture the read stdout (for prints)
# test closing pipe in subprocess unblocks read
with MockProcess() as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 0)
# wait should return before process exits
self.assertTrue(proc.is_alive())
self.assertFalse(proc.finished)
self.assertTrue(proc.finished) # make sure it did finish
# test output kwarg prints subprocess output
with MockProcess() as proc:
server.procs = [proc]
status = server.wait(output=True)
output = pop_stream(f)
self.assertIn('mock process started', output)
self.assertIn('setup complete', output)
# make sure we don't get prints after stdout was closed
self.assertNotIn('mock process finished', output)
# test process which fails to start
with MockProcess(fail_to_start=True) as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 1)
self.assertIn('failed', pop_stream(f))
# test multiple procs
procs = [MockProcess(delay=.5) for i in range(3)]
for proc in procs:
proc.start()
server.procs = procs
status = server.wait()
self.assertEqual(status, 0)
for proc in procs:
self.assertTrue(proc.is_alive())
for proc in procs:
proc.join()
def test_interact(self):
class MockProcess(object):
def __init__(self, fail=False):
self.returncode = None
if fail:
self._returncode = 1
else:
self._returncode = 0
def communicate(self):
self.returncode = self._returncode
return '', ''
server = manager.Server('test')
server.procs = [MockProcess()]
self.assertEqual(server.interact(), 0)
server.procs = [MockProcess(fail=True)]
self.assertEqual(server.interact(), 1)
procs = []
for fail in (False, True, True):
procs.append(MockProcess(fail=fail))
server.procs = procs
self.assertTrue(server.interact() > 0)
def test_launch(self):
# stubs
conf_files = (
'proxy-server.conf',
'auth-server.conf',
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('proxy-server.pid', 1),
('proxy-server/2.pid', 2),
)
# mocks
class MockSpawn(object):
def __init__(self, pids=None):
self.conf_files = []
self.kwargs = []
if not pids:
def one_forever():
while True:
yield 1
self.pids = one_forever()
else:
self.pids = (x for x in pids)
def __call__(self, conf_file, **kwargs):
self.conf_files.append(conf_file)
self.kwargs.append(kwargs)
rv = next(self.pids)
if isinstance(rv, Exception):
raise rv
else:
return rv
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# can't start server w/o an conf
server = manager.Server('test', run_dir=t)
self.assertFalse(server.launch())
# start mock os running all pids
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-proxy-server'),
('2/cmdline', 'swift-proxy-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as proc_dir:
manager.PROC_DIR = proc_dir
server = manager.Server('proxy', run_dir=t)
# can't start server if it's already running
self.assertFalse(server.launch())
output = pop_stream(f)
self.assertTrue('running' in output)
conf_file = self.join_swift_dir(
'proxy-server.conf')
self.assertTrue(conf_file in output)
pid_file = self.join_run_dir('proxy-server/2.pid')
self.assertTrue(pid_file in output)
self.assertTrue('already started' in output)
# no running pids
manager.os = MockOs([])
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
# test ignore once for non-start-once server
mock_spawn = MockSpawn([1])
server.spawn = mock_spawn
conf_file = self.join_swift_dir(
'proxy-server.conf')
expected = {
1: conf_file,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files,
[conf_file])
expected = {
'once': False,
}
self.assertEqual(mock_spawn.kwargs, [expected])
output = pop_stream(f)
self.assertIn('Starting', output)
self.assertNotIn('once', output)
# test multi-server kwarg once
server = manager.Server('object-replicator')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([1, 2, 3, 4])
server.spawn = mock_spawn
conf1 = self.join_swift_dir('object-server/1.conf')
conf2 = self.join_swift_dir('object-server/2.conf')
conf3 = self.join_swift_dir('object-server/3.conf')
conf4 = self.join_swift_dir('object-server/4.conf')
expected = {
1: conf1,
2: conf2,
3: conf3,
4: conf4,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files, [
conf1, conf2, conf3, conf4])
expected = {
'once': True,
}
self.assertEqual(len(mock_spawn.kwargs), 4)
for kwargs in mock_spawn.kwargs:
self.assertEqual(kwargs, expected)
# test number kwarg
mock_spawn = MockSpawn([4])
manager.PROC_DIR = proc_dir
server.spawn = mock_spawn
expected = {
4: conf4,
}
self.assertEqual(server.launch(number=4),
expected)
self.assertEqual(mock_spawn.conf_files, [conf4])
expected = {
'number': 4
}
self.assertEqual(mock_spawn.kwargs, [expected])
# test cmd does not exist
server = manager.Server('auth')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([OSError(errno.ENOENT,
'blah')])
server.spawn = mock_spawn
self.assertEqual(server.launch(), {})
self.assertTrue(
'swift-auth-server does not exist' in
pop_stream(f))
finally:
sys.stdout = old_stdout
def test_stop(self):
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
pid_files = (
('account-reaper/1.pid', 1),
('account-reaper/2.pid', 2),
('account-reaper/3.pid', 3),
('account-reaper/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# start all pids in mock os
manager.os = MockOs(pids)
server = manager.Server('account-reaper', run_dir=t)
# test kill all running pids
pids = server.stop()
self.assertEqual(len(pids), 4)
for pid in (1, 2, 3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
conf1 = self.join_swift_dir('account-reaper/1.conf')
conf2 = self.join_swift_dir('account-reaper/2.conf')
conf3 = self.join_swift_dir('account-reaper/3.conf')
conf4 = self.join_swift_dir('account-reaper/4.conf')
# reset mock os with only 2 running pids
manager.os = MockOs([3, 4])
pids = server.stop()
self.assertEqual(len(pids), 2)
for pid in (3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
self.assertFalse(os.path.exists(conf1))
self.assertFalse(os.path.exists(conf2))
# test number kwarg
manager.os = MockOs([3, 4])
pids = server.stop(number=3)
self.assertEqual(len(pids), 1)
expected = {
3: self.join_run_dir('account-reaper/3.pid'),
}
self.assertEqual(expected, pids)
self.assertEqual(manager.os.pid_sigs[3], [signal.SIGTERM])
self.assertFalse(os.path.exists(conf4))
self.assertFalse(os.path.exists(conf3))
class TestManager(unittest.TestCase):
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_create(self, mock_verify):
m = manager.Manager(['test'])
self.assertEqual(len(m.servers), 1)
server = m.servers.pop()
self.assertTrue(isinstance(server, manager.Server))
self.assertEqual(server.server, 'test-server')
# test multi-server and simple dedupe
servers = ['object-replicator', 'object-auditor',
'object-replicator']
m = manager.Manager(servers)
self.assertEqual(len(m.servers), 2)
for server in m.servers:
self.assertTrue(server.server in servers)
# test all
m = manager.Manager(['all'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test main
m = manager.Manager(['main'])
self.assertEqual(len(m.servers), len(manager.MAIN_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.MAIN_SERVERS)
# test rest
m = manager.Manager(['rest'])
self.assertEqual(len(m.servers), len(manager.REST_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.REST_SERVERS)
# test main + rest == all
m = manager.Manager(['main', 'rest'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test dedupe
m = manager.Manager(['main', 'rest', 'proxy', 'object',
'container', 'account'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test glob
m = manager.Manager(['object-*'])
object_servers = [s for s in manager.ALL_SERVERS if
s.startswith('object')]
self.assertEqual(len(m.servers), len(object_servers))
for s in m.servers:
self.assertTrue(str(s) in object_servers)
m = manager.Manager(['*-replicator'])
replicators = [s for s in manager.ALL_SERVERS if
s.endswith('replicator')]
for s in m.servers:
self.assertTrue(str(s) in replicators)
# test invalid server
m = manager.Manager(['error'])
self.assertEqual(len(m.servers), 0)
# test valid + invalid server
servers = ['object-server']
m = manager.Manager(['object', 'error'])
self.assertEqual(len(m.servers), 1)
for server in m.servers:
self.assertTrue(server.server in servers)
# test multi-server and invalid server together
servers = ['object-replicator', 'object-auditor', 'error']
m = manager.Manager(servers)
self.assertEqual(len(m.servers), 2)
for server in m.servers:
self.assertTrue(server.server in servers[:2])
def test_iter(self):
with mock.patch.object(manager, 'find_executable', lambda x: x):
m = manager.Manager(['all'])
self.assertEqual(len(list(m)), len(manager.ALL_SERVERS))
for server in m:
self.assertTrue(server.server in manager.ALL_SERVERS)
def test_default_strict(self):
# test default strict
m = manager.Manager(['proxy'])
self.assertEqual(m._default_strict, True)
# aliases
m = manager.Manager(['main'])
self.assertEqual(m._default_strict, False)
m = manager.Manager(['proxy*'])
self.assertEqual(m._default_strict, False)
def test_status(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called_kwargs = []
def status(self, **kwargs):
self.called_kwargs.append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
def mock_verify_server(server):
if 'error' in server:
return False
return True
old_verify_server = manager.verify_server
old_server_class = manager.Server
try:
manager.verify_server = mock_verify_server
manager.Server = MockServer
m = manager.Manager(['test'])
status = m.status()
self.assertEqual(status, 0)
m = manager.Manager(['error'])
status = m.status()
self.assertEqual(status, 1)
# test multi-server
m = manager.Manager(['test', 'error'])
kwargs = {'key': 'value'}
status = m.status(**kwargs)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called_kwargs, [kwargs])
finally:
manager.verify_server = old_verify_server
manager.Server = old_server_class
def test_start(self):
def mock_setup_env():
getattr(mock_setup_env, 'called', []).append(True)
def mock_verify_server(server):
if 'none' in server:
return False
return True
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
if 'noconfig' in self.server:
return {}
elif 'somerunning' in self.server:
return {}
else:
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
def stop(self, **kwargs):
self.called['stop'].append(kwargs)
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
if 'raise' in self.server:
raise KeyboardInterrupt
elif 'error' in self.server:
return 1
else:
return 0
old_setup_env = manager.setup_env
old_verify_server = manager.verify_server
old_swift_server = manager.Server
try:
manager.setup_env = mock_setup_env
manager.verify_server = mock_verify_server
manager.Server = MockServer
# test no errors on launch
m = manager.Manager(['proxy'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
# test error on launch
m = manager.Manager(['proxy', 'error'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test missing (on launch, as it happens)
# We only throw a bad error code if nothing good was run.
m = manager.Manager(['none'])
status = m.start()
self.assertEqual(status, 1)
m = manager.Manager(['proxy', 'none'])
status = m.start()
self.assertEqual(status, 0)
# test interact
m = manager.Manager(['proxy', 'error'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [kwargs])
self.assertEqual(server.called['interact'], [kwargs])
m = manager.Manager(['raise'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
# test no config
m = manager.Manager(['proxy', 'noconfig'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with --non-strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config --strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test no config with alias
m = manager.Manager(['main', 'noconfig'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with alias and --non-strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config with alias and --strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running
m = manager.Manager(['proxy', 'somerunning'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running --non-strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running --strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running with alias
m = manager.Manager(['main', 'somerunning'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running with alias and --non-strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running with alias and --strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
finally:
manager.setup_env = old_setup_env
manager.verify_server = old_verify_server
manager.Server = old_swift_server
def test_no_wait(self):
def mock_verify_server(server):
if 'error' in server:
return False
return True
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
orig_verify_server = manager.verify_server
orig_swift_server = manager.Server
try:
manager.verify_server = mock_verify_server
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test no errocode status even on invalid
init = manager.Manager(['invalid'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test wait with once option
init = manager.Manager(['updater', 'replicator-invalid'])
status = init.no_wait(once=True)
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertTrue('once' in called_kwargs)
self.assertTrue(called_kwargs['once'])
self.assertFalse(server.called['wait'])
finally:
manager.verify_server = orig_verify_server
manager.Server = orig_swift_server
def test_no_daemon(self):
def mock_verify_server(server):
return True
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
return int('error' in self.server)
orig_verify_server = manager.verify_server
orig_swift_server = manager.Server
try:
manager.Server = MockServer
manager.verify_server = mock_verify_server
# test success
init = manager.Manager(['proxy'])
stats = init.no_daemon()
self.assertEqual(stats, 0)
# test error
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
self.assertEqual(stats, 1)
# test once
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
self.assertEqual(len(server.called['wait']), 0)
self.assertEqual(len(server.called['interact']), 1)
finally:
manager.verify_server = orig_verify_server
manager.Server = orig_swift_server
def test_once(self):
def mock_verify_server(server):
if 'error' in server:
return False
return True
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
return {1: 'account-reaper'}
orig_verify_server = manager.verify_server
orig_swift_server = manager.Server
try:
manager.Server = MockServer
manager.verify_server = mock_verify_server
# test no errors
init = manager.Manager(['account-reaper'])
status = init.once()
self.assertEqual(status, 0)
# test error code on error
init = manager.Manager(['error'])
status = init.once()
self.assertEqual(status, 1)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertEqual(called_kwargs, {'once': True})
self.assertEqual(len(server.called['wait']), 1)
self.assertEqual(len(server.called['interact']), 0)
finally:
manager.Server = orig_swift_server
manager.verify_server = orig_verify_server
def test_stop(self):
def mock_verify_server(server):
if 'error' in server:
return False
return True
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
def mock_kill_group(pid, sig):
self.fail('kill_group should not be called')
_orig_verify_server = manager.verify_server
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
manager.verify_server = mock_verify_server
# test stop one server
server_pids = {
'test': {1: "dummy.pid"}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 0)
# test not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
# test kill not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.kill()
self.assertEqual(status, 0)
# test won't die
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
finally:
manager.verify_server = _orig_verify_server
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_stop_kill_after_timeout(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
mock_kill_group_called = []
def mock_kill_group(*args):
mock_kill_group_called.append(args)
def mock_kill_group_oserr(*args):
raise OSError()
def mock_kill_group_oserr_ESRCH(*args):
raise OSError(errno.ESRCH, 'No such process')
def mock_verify_server(server):
if 'error' in server:
return False
return True
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
_orig_verify_server = manager.verify_server
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
manager.verify_server = mock_verify_server
# test stop one server
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
self.assertEqual(mock_kill_group_called, [(None, 9)])
manager.kill_group = mock_kill_group_oserr
# test stop one server - OSError
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
with self.assertRaises(OSError):
status = m.stop(kill_after_timeout=True)
manager.kill_group = mock_kill_group_oserr_ESRCH
# test stop one server - OSError: No such process
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
manager.verify_server = _orig_verify_server
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_shutdown(self, mock_verify):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
expected = {'graceful': True}
self.assertEqual(kwargs, expected)
return 0
m.stop = mock_stop
status = m.shutdown()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_restart(self, mock_verify):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
return 0
m.start_was_called = False
def mock_start(*args, **kwargs):
m.start_was_called = True
return 0
m.stop = mock_stop
m.start = mock_start
status = m.restart()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
self.assertEqual(m.start_was_called, True)
def test_reload(self):
def do_test(graceful):
called = defaultdict(list)
def stop(self, **kwargs):
called[self].append(('stop', kwargs))
return 0
def start(self, **kwargs):
called[self].append(('start', kwargs))
return 0
m = manager.Manager(['*-server'])
expected_servers = set([server.server for server in m.servers])
self.assertEqual(len(expected_servers), 4)
for server in expected_servers:
self.assertIn(server, manager.GRACEFUL_SHUTDOWN_SERVERS)
with mock.patch('swift.common.manager.Manager.start', start):
with mock.patch('swift.common.manager.Manager.stop', stop):
status = m.reload(graceful=graceful)
self.assertEqual(status, 0)
self.assertEqual(4, len(called))
actual_servers = set()
for m, calls in called.items():
self.assertEqual(calls, [('stop', {'graceful': True}),
('start', {'graceful': True})])
actual_servers.update([server.server for server in m.servers])
self.assertEqual(expected_servers, actual_servers)
with mock.patch.object(manager, 'find_executable', lambda x: x):
do_test(graceful=True)
do_test(graceful=False) # graceful is forced regardless
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_force_reload(self, mock_verify):
m = manager.Manager(['test'])
m.reload_was_called = False
def mock_reload(*args, **kwargs):
m.reload_was_called = True
return 0
m.reload = mock_reload
status = m.force_reload()
self.assertEqual(status, 0)
self.assertEqual(m.reload_was_called, True)
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_get_command(self, mock_verify):
m = manager.Manager(['test'])
self.assertEqual(m.start, m.get_command('start'))
self.assertEqual(m.force_reload, m.get_command('force-reload'))
self.assertEqual(m.get_command('force-reload'),
m.get_command('force_reload'))
self.assertRaises(manager.UnknownCommandError, m.get_command,
'no_command')
self.assertRaises(manager.UnknownCommandError, m.get_command,
'__init__')
def test_list_commands(self):
for cmd, help in manager.Manager.list_commands():
method = getattr(manager.Manager, cmd.replace('-', '_'), None)
self.assertTrue(method, '%s is not a command' % cmd)
self.assertTrue(getattr(method, 'publicly_accessible', False))
self.assertEqual(method.__doc__.strip(), help)
@mock.patch.object(manager, 'verify_server',
side_effect=lambda server: 'error' not in server)
def test_run_command(self, mock_verify):
m = manager.Manager(['test'])
m.cmd_was_called = False
def mock_cmd(*args, **kwargs):
m.cmd_was_called = True
expected = {'kw1': True, 'kw2': False}
self.assertEqual(kwargs, expected)
return 0
mock_cmd.publicly_accessible = True
m.mock_cmd = mock_cmd
kwargs = {'kw1': True, 'kw2': False}
status = m.run_command('mock_cmd', **kwargs)
self.assertEqual(status, 0)
self.assertEqual(m.cmd_was_called, True)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_manager.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.storage_policies """
import contextlib
import six
import logging
import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.debug_logger import debug_logger
from test.unit import (
patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE)
import swift.common.storage_policy
from swift.common.storage_policy import (
StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
reload_storage_policies, get_policy_string, split_policy_string,
BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache)
from swift.common.ring import RingData
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver
class CapturingHandler(logging.Handler):
def __init__(self):
super(CapturingHandler, self).__init__()
self._records = []
def emit(self, record):
self._records.append(record)
@contextlib.contextmanager
def capture_logging(log_name):
captured = CapturingHandler()
logger = logging.getLogger(log_name)
logger.addHandler(captured)
try:
yield captured._records
finally:
logger.removeHandler(captured)
@BaseStoragePolicy.register('fake')
class FakeStoragePolicy(BaseStoragePolicy):
"""
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
if six.PY2:
conf = ConfigParser()
conf.readfp(six.StringIO(conf_str))
else:
conf = ConfigParser(strict=False)
conf.read_file(six.StringIO(conf_str))
return conf
def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
try:
f(*args, **kwargs)
except exc_class as err:
err_msg = str(err)
self.assertTrue(message in err_msg, 'Error message %r did not '
'have expected substring %r' % (err_msg, message))
else:
self.fail('%r did not raise %s' % (message, exc_class.__name__))
def test_policy_baseclass_instantiate(self):
self.assertRaisesWithMessage(TypeError,
"Can't instantiate BaseStoragePolicy",
BaseStoragePolicy, 1, 'one')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@patch_policies
def test_get_policy_string(self):
self.assertEqual(get_policy_string('something', 0), 'something')
self.assertEqual(get_policy_string('something', None), 'something')
self.assertEqual(get_policy_string('something', ''), 'something')
self.assertEqual(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
@patch_policies
def test_split_policy_string(self):
expectations = {
'something': ('something', POLICIES[0]),
'something-1': ('something', POLICIES[1]),
'tmp': ('tmp', POLICIES[0]),
'objects': ('objects', POLICIES[0]),
'tmp-1': ('tmp', POLICIES[1]),
'objects-1': ('objects', POLICIES[1]),
'objects-': PolicyError,
'objects-0': PolicyError,
'objects--1': ('objects-', POLICIES[1]),
'objects-+1': PolicyError,
'objects--': PolicyError,
'objects-foo': PolicyError,
'objects--bar': PolicyError,
'objects-+bar': PolicyError,
# questionable, demonstrated as inverse of get_policy_string
'objects+0': ('objects+0', POLICIES[0]),
'': ('', POLICIES[0]),
'0': ('0', POLICIES[0]),
'-1': ('', POLICIES[1]),
}
for policy_string, expected in expectations.items():
if expected == PolicyError:
try:
invalid = split_policy_string(policy_string)
except PolicyError:
continue # good
else:
self.fail('The string %r returned %r '
'instead of raising a PolicyError' %
(policy_string, invalid))
self.assertEqual(expected, split_policy_string(policy_string))
# should be inverse of get_policy_string
self.assertEqual(policy_string, get_policy_string(*expected))
def test_defaults(self):
self.assertGreater(len(POLICIES), 0)
# test class functions
default_policy = POLICIES.default
self.assertTrue(default_policy.is_default)
zero_policy = POLICIES.get_by_index(0)
self.assertTrue(zero_policy.idx == 0)
zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
self.assertTrue(zero_policy_by_name.idx == 0)
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'eleven',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3,
ec_duplication_factor=2)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
self.assertTrue(policy.__class__.__name__ in policy_repr)
self.assertTrue('is_default=%s' % policy.is_default in policy_repr)
self.assertTrue('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assertTrue(policy.name in policy_repr)
if policy.policy_type == EC_POLICY:
self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr)
self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr)
self.assertTrue('ec_nparity=%s' %
policy.ec_nparity in policy_repr)
self.assertTrue('ec_segment_size=%s' %
policy.ec_segment_size in policy_repr)
if policy.ec_duplication_factor > 1:
self.assertTrue('ec_duplication_factor=%s' %
policy.ec_duplication_factor in
policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assertTrue(
policies.__class__.__name__ in collection_repr_lines[0])
self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
for policy, line in zip(policies, collection_repr_lines[1:-1]):
self.assertTrue(repr(policy) in line)
with patch_policies(policies):
self.assertEqual(repr(POLICIES), collection_repr)
def test_validate_policies_defaults(self):
# 0 explicit default
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
# non-zero explicit default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[2])
self.assertEqual(policies.default.name, 'two')
# multiple defaults
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', True)]
self.assertRaisesWithMessage(
PolicyError, 'Duplicate default', StoragePolicyCollection,
test_policies)
# nothing specified
test_policies = []
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
self.assertEqual(policies.default.name, 'Policy-0')
# no default specified with only policy index 0
test_policies = [StoragePolicy(0, 'zero')]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
# no default specified with multiple policies
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_deprecate_policies(self):
# deprecation specified
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False, is_deprecated=True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
self.assertEqual(len(policies), 3)
# multiple policies requires default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False, is_deprecated=True),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_validate_policies_indexes(self):
# duplicate indexes
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(1, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
# non-zero Policy-0
self.assertRaisesWithMessage(PolicyError, 'reserved',
FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
'',
'name_foo',
'name\nfoo',
'name foo',
u'name \u062a',
'name \xd8\xaa',
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'zero', False),
StoragePolicy(2, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policies_type_default(self):
# no type specified - make sure the policy is initialized to
# DEFAULT_POLICY_TYPE
test_policy = FakeStoragePolicy(0, 'zero', True)
self.assertEqual(test_policy.policy_type, 'fake')
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
def test_policies_type_attribute(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.get_by_index(0).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(1).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(2).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(3).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(10).policy_type,
EC_POLICY)
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True),
StoragePolicy(1, 'One', False)])
pol0 = policies[0]
pol1 = policies[1]
for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
self.assertEqual(pol0, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'zEro')
for name in ('one', 'ONE', 'oNe', 'OnE'):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_wacky_int_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, '1', True, aliases='-1'),
StoragePolicy(1, '0', False)]
policies = StoragePolicyCollection(test_policies)
with self.assertRaises(PolicyError):
policies.get_by_name_or_index('0')
self.assertEqual(policies.get_by_name('1'), test_policies[0])
self.assertEqual(policies.get_by_index(0), test_policies[0])
with self.assertRaises(PolicyError):
policies.get_by_name_or_index('1')
self.assertEqual(policies.get_by_name('0'), test_policies[1])
self.assertEqual(policies.get_by_index(1), test_policies[1])
self.assertIsNone(policies.get_by_index(-1))
self.assertEqual(policies.get_by_name_or_index('-1'), test_policies[0])
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
extra_commas_conf = self._conf("""
[storage-policy:0]
name = one
aliases = ,,one, ,
default = yes
""")
# Extra blank entries should be silently dropped
policies = parse_storage_policies(extra_commas_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=14)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=6)),
ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2,
object_ring=FakeRing(replicas=12)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
for name in ('ec4-2', 'poseidon', 'neptune'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[2])
for name in ('ec4-2-dup', 'uzuki', 'rin'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[3])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
[storage-policy:2]
name = ec4-2-dup
aliases = uzuki, rin
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 4
ec_num_parity_fragments = 2
ec_duplication_factor = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
self.assertEqual(ec_policies.get_by_name('ec4-2-dup'),
ec_policies.get_by_name('uzuki'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, '')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertIsNone(policies.get_by_name('tahi'))
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertIsNone(policies.get_by_name('two'))
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
deprecated = yes
default = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Deprecated policy can not be default",
parse_storage_policies, bad_conf)
def test_multiple_policies_with_no_policy_index_zero(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
default = yes
""")
# Policy-0 will not be implicitly added if other policies are defined
self.assertRaisesWithMessage(
PolicyError, "must specify a storage policy section "
"for policy index 0", parse_storage_policies, bad_conf)
@mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES',
['isa_l_rs_vand', 'isa_l_rs_cauchy'])
@mock.patch('swift.common.storage_policy.ECDriver')
def test_known_bad_ec_config(self, mock_driver):
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records, \
self.assertRaises(PolicyError) as exc_mgr:
parse_storage_policies(bad_conf)
self.assertEqual(exc_mgr.exception.args[0],
'Storage policy bad-policy uses an EC '
'configuration known to harm data durability. This '
'policy MUST be deprecated.')
mock_driver.assert_not_called()
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
slightly_less_bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
deprecated = true
[storage-policy:1]
name = good-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
default = true
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(slightly_less_bad_conf)
self.assertEqual(2, mock_driver.call_count)
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
def test_no_default(self):
orig_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
default = yes
""")
policies = parse_storage_policies(orig_conf)
self.assertEqual(policies.default, policies[1])
self.assertEqual('zero', policies[0].name)
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
deprecated = yes
""")
# multiple polices and no explicit default
self.assertRaisesWithMessage(
PolicyError, "Unable to find default",
parse_storage_policies, bad_conf)
good_conf = self._conf("""
[storage-policy:0]
name = Policy-0
default = yes
[storage-policy:1]
name = one
deprecated = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.default, policies[0])
self.assertTrue(policies[1].is_deprecated)
def test_parse_storage_policies(self):
# ValueError when deprecating policy 0
bad_conf = self._conf("""
[storage-policy:0]
name = zero
deprecated = yes
[storage-policy:1]
name = one
deprecated = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Unable to find policy that's not deprecated",
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x:1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = zero
boo = berries
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid option',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name =
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:3]
name = Policy-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = policY-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
[storage-policy:1]
name = ONE
""")
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = good_stuff
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
# policy_type = erasure_coding
# missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# missing ec_type, but other options valid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# ec_type specified, but invalid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
default = yes
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = garbage_alg
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError,
'Wrong ec_type garbage_alg for policy '
'ec10-4, should be one of "%s"' %
(', '.join(VALID_EC_TYPES)),
parse_storage_policies, bad_conf)
# missing and invalid ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
for num_parity in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = %(num_parity)s
""" % {'ec_type': DEFAULT_TEST_EC_TYPE,
'num_parity': num_parity})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
# missing and invalid ec_num_data_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
for num_data in ('-10', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = %(num_data)s
ec_num_parity_fragments = 4
""" % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
# invalid ec_object_segment_size
for segment_size in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_object_segment_size = %(segment_size)s
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'segment_size': segment_size,
'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_object_segment_size',
parse_storage_policies, bad_conf)
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
foo = bar
[storage-policy:0]
name = zero
[storage-policy:5]
name = one
default = yes
[storage-policy:6]
name = duplicate-sections-are-ignored
[storage-policy:6]
name = apple
""")
policies = parse_storage_policies(conf)
self.assertEqual(True, policies.get_by_index(5).is_default)
self.assertEqual(False, policies.get_by_index(0).is_default)
self.assertEqual(False, policies.get_by_index(6).is_default)
self.assertEqual("object", policies.get_by_name("zero").ring_name)
self.assertEqual("object-5", policies.get_by_name("one").ring_name)
self.assertEqual("object-6", policies.get_by_name("apple").ring_name)
self.assertEqual(0, int(policies.get_by_name('zero')))
self.assertEqual(5, int(policies.get_by_name('one')))
self.assertEqual(6, int(policies.get_by_name('apple')))
self.assertEqual("zero", policies.get_by_index(0).name)
self.assertEqual("zero", policies.get_by_index("0").name)
self.assertEqual("one", policies.get_by_index(5).name)
self.assertEqual("apple", policies.get_by_index(6).name)
self.assertEqual("zero", policies.get_by_index(None).name)
self.assertEqual("zero", policies.get_by_index('').name)
self.assertEqual(policies.get_by_index(0), policies.legacy)
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:00]
name = double-zero
""")
with NamedTemporaryFile(mode='w+t') as f:
conf.write(f)
f.flush()
with mock.patch('swift.common.utils.SWIFT_CONF_FILE',
new=f.name):
try:
reload_storage_policies()
except SystemExit as e:
err_msg = str(e)
else:
self.fail('SystemExit not raised')
parts = [
'Invalid Storage Policy Configuration',
'Duplicate index',
]
for expected in parts:
self.assertTrue(
expected in err_msg, '%s was not in %s' % (expected,
err_msg))
def test_storage_policy_ordering(self):
test_policies = StoragePolicyCollection([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(503, 'error'),
StoragePolicy(204, 'empty'),
StoragePolicy(404, 'missing'),
])
self.assertEqual([0, 204, 404, 503], [int(p) for p in
sorted(list(test_policies))])
p503 = test_policies[503]
self.assertTrue(501 < p503 < 507)
def test_storage_policies_as_dict_keys(self):
# We have tests that expect to be able to map policies
# to expected values in a dict; check that we can use
# policies as keys.
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policy_to_name_map = {p: p.name for p in test_policies}
self.assertEqual(sorted(policy_to_name_map.keys()), test_policies)
self.assertIs(test_policies[0], next(
p for p in policy_to_name_map.keys() if p.is_default))
for p in test_policies:
self.assertEqual(policy_to_name_map[p], p.name)
def test_get_object_ring(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policies = StoragePolicyCollection(test_policies)
class NamedFakeRing(FakeRing):
def __init__(self, swift_dir, reload_time=15, ring_name=None,
validation_hook=None):
self.ring_name = ring_name
super(NamedFakeRing, self).__init__()
with mock.patch('swift.common.storage_policy.Ring',
new=NamedFakeRing):
for policy in policies:
self.assertFalse(policy.object_ring)
ring = policies.get_object_ring(int(policy), '/path/not/used')
self.assertEqual(ring.ring_name, policy.ring_name)
self.assertTrue(policy.object_ring)
self.assertTrue(isinstance(policy.object_ring, NamedFakeRing))
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
for policy in policies:
policy.load_ring('/path/not/used')
expected = policies.get_object_ring(int(policy),
'/path/not/used')
self.assertEqual(policy.object_ring, expected)
# bad policy index
self.assertRaises(PolicyError, policies.get_object_ring, 99,
'/path/not/used')
def test_bind_ports_cache(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
my_ips = ['1.2.3.4', '2.3.4.5']
other_ips = ['3.4.5.6', '4.5.6.7']
bind_ip = my_ips[1]
devs_by_ring_name1 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6006},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6007},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6008},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6009}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6006}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6010},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6011},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6012}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6010}, # on our IP and a not-us IP
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6013},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6014},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6015}],
}
devs_by_ring_name2 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6016},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6019}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6016}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6022}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6020},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6025}],
}
ring_files = [ring_name + '.ring.gz'
for ring_name in sorted(devs_by_ring_name1)]
def _fake_load(gz_path, stub_objs, metadata_only=False):
return RingData(
devs=stub_objs[os.path.basename(gz_path)[:-8]],
replica2part2dev_id=[],
part_shift=24)
with mock.patch(
'swift.common.storage_policy.RingData.load'
) as mock_ld, \
patch_policies(test_policies), \
mock.patch('swift.common.storage_policy.whataremyips') \
as mock_whataremyips, \
temptree(ring_files) as tempdir:
mock_whataremyips.return_value = my_ips
cache = BindPortsCache(tempdir, bind_ip)
self.assertEqual([
mock.call(bind_ip),
], mock_whataremyips.mock_calls)
mock_whataremyips.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name1)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name2)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# but when all the file mtimes are made different, it'll
# reload
for gz_file in [os.path.join(tempdir, n)
for n in ring_files]:
os.utime(gz_file, (88, 88))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
# Don't do something stupid like crash if a ring file is missing.
os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# whataremyips() is only called in the constructor
self.assertEqual([], mock_whataremyips.mock_calls)
def test_singleton_passthrough(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
with patch_policies(test_policies):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
def test_quorum_size_replication(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
for n, expected in expected_sizes.items():
policy = StoragePolicy(0, 'zero',
object_ring=FakeRing(replicas=n))
self.assertEqual(policy.quorum, expected)
def test_quorum_size_erasure_coding(self):
test_ec_policies = [
ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2),
ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
ec_ndata=10, ec_nparity=6),
ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2, ec_duplication_factor=2),
]
for ec_policy in test_ec_policies:
k = ec_policy.ec_ndata
expected_size = (
(k + ec_policy.pyeclib_driver.min_parity_fragments_needed())
* ec_policy.ec_duplication_factor
)
self.assertEqual(expected_size, ec_policy.quorum)
def test_validate_ring(self):
test_policies = [
ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
is_default=True),
ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2),
ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2)
]
policies = StoragePolicyCollection(test_policies)
class MockRingData(object):
def __init__(self, num_replica):
self.replica_count = num_replica
def do_test(actual_load_ring_replicas):
for policy, ring_replicas in zip(policies,
actual_load_ring_replicas):
with mock.patch('swift.common.ring.ring.RingData.load',
return_value=MockRingData(ring_replicas)):
necessary_replica_num = (policy.ec_n_unique_fragments *
policy.ec_duplication_factor)
with mock.patch(
'swift.common.ring.ring.validate_configuration'):
msg = 'EC ring for policy %s needs to be configured ' \
'with exactly %d replicas.' % \
(policy.name, necessary_replica_num)
self.assertRaisesWithMessage(RingLoadError, msg,
policy.load_ring, 'mock')
# first, do somethign completely different
do_test([8, 10, 7, 11])
# then again, closer to true, but fractional
do_test([9.9, 14.1, 5.99999, 12.000000001])
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'done', is_deprecated=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
expected = {
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'diskfile_module': 'egg:swift#replication.fs',
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'diskfile_module': 'egg:swift#replication.fs',
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'diskfile_module': 'egg:swift#erasure_coding.fs',
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'diskfile_module': 'egg:swift#erasure_coding.fs',
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
# enabled ec with ec_duplication
(12, True): {
'name': 'twelve',
'aliases': 'twelve',
'default': False,
'deprecated': False,
'diskfile_module': 'egg:swift#erasure_coding.fs',
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 2,
},
(12, False): {
'name': 'twelve',
'aliases': 'twelve',
},
}
self.maxDiff = None
for policy in policies:
expected_info = expected[(int(policy), True)]
self.assertEqual(policy.get_info(config=True), expected_info)
expected_info = expected[(int(policy), False)]
self.assertEqual(policy.get_info(config=False), expected_info)
def test_ec_fragment_size_cached(self):
policy = ECStoragePolicy(
0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3),
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True)
ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE,
k=2, m=1)
expected_fragment_size = ec_driver.get_segment_info(
DEFAULT_EC_OBJECT_SEGMENT_SIZE,
DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size']
with mock.patch.object(
policy.pyeclib_driver, 'get_segment_info') as fake:
fake.return_value = {
'fragment_size': expected_fragment_size}
for x in range(10):
self.assertEqual(expected_fragment_size,
policy.fragment_size)
# pyeclib_driver.get_segment_info is called only once
self.assertEqual(1, fake.call_count)
def test_get_diskfile_manager(self):
# verify unique diskfile manager instances are returned
policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='replication.fs')
dfm = policy.get_diskfile_manager({'devices': 'sdb1'}, debug_logger())
self.assertEqual('sdb1', dfm.devices)
dfm = policy.get_diskfile_manager({'devices': 'sdb2'}, debug_logger())
self.assertEqual('sdb2', dfm.devices)
dfm2 = policy.get_diskfile_manager({'devices': 'sdb2'}, debug_logger())
self.assertEqual('sdb2', dfm2.devices)
self.assertIsNot(dfm, dfm2)
def test_get_diskfile_manager_custom_diskfile(self):
calls = []
is_policy_ok = True
class DFM(object):
def __init__(self, *args, **kwargs):
calls.append((args, kwargs))
@classmethod
def check_policy(cls, policy):
if not is_policy_ok:
raise ValueError("I am not ok")
policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='thin_air.fs')
with mock.patch(
'swift.common.storage_policy.load_pkg_resource',
side_effect=lambda *a, **kw: DFM) as mock_load_pkg_resource:
dfm = policy.get_diskfile_manager('arg', kwarg='kwarg')
self.assertIsInstance(dfm, DFM)
mock_load_pkg_resource.assert_called_with(
'swift.diskfile', 'thin_air.fs')
self.assertEqual([(('arg',), {'kwarg': 'kwarg'})], calls)
calls = []
is_policy_ok = False
with mock.patch(
'swift.common.storage_policy.load_pkg_resource',
side_effect=lambda *a, **kw: DFM) as mock_load_pkg_resource:
with self.assertRaises(PolicyError) as cm:
policy.get_diskfile_manager('arg', kwarg='kwarg')
mock_load_pkg_resource.assert_called_with(
'swift.diskfile', 'thin_air.fs')
self.assertIn('Invalid diskfile_module thin_air.fs', str(cm.exception))
def test_get_diskfile_manager_invalid_policy_config(self):
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='erasure_coding.fs')
with self.assertRaises(PolicyError) as cm:
bad_policy.get_diskfile_manager()
self.assertIn('Invalid diskfile_module erasure_coding.fs',
str(cm.exception))
bad_policy = ECStoragePolicy(0, name='one', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
diskfile_module='replication.fs')
with self.assertRaises(PolicyError) as cm:
bad_policy.get_diskfile_manager()
self.assertIn('Invalid diskfile_module replication.fs',
str(cm.exception))
bad_policy = StoragePolicy(0, name='zero', is_default=True,
diskfile_module='thin_air.fs')
with self.assertRaises(PolicyError) as cm:
bad_policy.get_diskfile_manager()
self.assertIn('Unable to load diskfile_module thin_air.fs',
str(cm.exception))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_storage_policy.py |
# Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from swift.common.base_storage_server import BaseStorageServer
from tempfile import mkdtemp
from swift import __version__ as swift_version
from swift.common.swob import Request
from swift.common.utils import get_logger, public, replication
from shutil import rmtree
class FakeOPTIONS(BaseStorageServer):
server_type = 'test-server'
def __init__(self, conf, logger=None):
super(FakeOPTIONS, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='test-server')
class FakeANOTHER(FakeOPTIONS):
@public
def ANOTHER(self):
"""this is to test adding to allowed_methods"""
pass
@replication
@public
def REPLICATE(self):
"""this is to test replication_server"""
pass
@public
@replication
def REPLICATE2(self):
"""this is to test replication_server"""
pass
class TestBaseStorageServer(unittest.TestCase):
"""Test swift.common.base_storage_server"""
def setUp(self):
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_base_storage_server')
def tearDown(self):
"""Tear down for testing swift.common.base_storage_server"""
rmtree(self.tmpdir)
def test_server_type(self):
conf = {'devices': self.testdir, 'mount_check': 'false'}
baseserver = BaseStorageServer(conf)
msg = 'Storage nodes have not implemented the Server type.'
try:
baseserver.server_type
except NotImplementedError as e:
self.assertEqual(str(e), msg)
def test_allowed_methods(self):
conf = {'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'}
# test what's available in the base class
allowed_methods_test = FakeOPTIONS(conf).allowed_methods
self.assertEqual(allowed_methods_test, ['OPTIONS'])
# test that a subclass can add allowed methods
allowed_methods_test = FakeANOTHER(conf).allowed_methods
allowed_methods_test.sort()
self.assertEqual(allowed_methods_test, [
'ANOTHER', 'OPTIONS'])
conf = {'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'}
# test what's available in the base class
allowed_methods_test = FakeOPTIONS(conf).allowed_methods
self.assertEqual(allowed_methods_test, ['OPTIONS'])
# test that a subclass can add allowed methods
allowed_methods_test = FakeANOTHER(conf).allowed_methods
self.assertEqual(allowed_methods_test, [
'ANOTHER', 'OPTIONS', 'REPLICATE', 'REPLICATE2'])
conf = {'devices': self.testdir, 'mount_check': 'false'}
# test what's available in the base class
allowed_methods_test = FakeOPTIONS(conf).allowed_methods
self.assertEqual(allowed_methods_test, ['OPTIONS'])
# test that a subclass can add allowed methods
allowed_methods_test = FakeANOTHER(conf).allowed_methods
allowed_methods_test.sort()
self.assertEqual(allowed_methods_test, [
'ANOTHER', 'OPTIONS', 'REPLICATE', 'REPLICATE2'])
def test_OPTIONS_error(self):
msg = 'Storage nodes have not implemented the Server type.'
conf = {'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'}
baseserver = BaseStorageServer(conf)
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
try:
baseserver.OPTIONS(req)
except NotImplementedError as e:
self.assertEqual(str(e), msg)
def test_OPTIONS(self):
conf = {'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'}
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = FakeOPTIONS(conf).OPTIONS(req)
self.assertEqual(resp.headers['Allow'], 'OPTIONS')
self.assertEqual(resp.headers['Server'],
'test-server/' + swift_version)
| swift-master | test/unit/common/test_base_storage_server.py |
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift.common.linkat`'''
import ctypes
import unittest
import os
import mock
from uuid import uuid4
from tempfile import gettempdir
from swift.common.linkat import linkat
from swift.common.utils import O_TMPFILE
from test.unit import requires_o_tmpfile_support_in_tmp
class TestLinkat(unittest.TestCase):
def test_flags(self):
self.assertTrue(hasattr(linkat, 'AT_FDCWD'))
self.assertTrue(hasattr(linkat, 'AT_SYMLINK_FOLLOW'))
@mock.patch('swift.common.linkat.linkat._c_linkat', None)
def test_available(self):
self.assertFalse(linkat.available)
@requires_o_tmpfile_support_in_tmp
def test_errno(self):
with open('/dev/null', 'r') as fd:
self.assertRaises(IOError, linkat,
linkat.AT_FDCWD, "/proc/self/fd/%s" % (fd),
linkat.AT_FDCWD, "%s/testlinkat" % gettempdir(),
linkat.AT_SYMLINK_FOLLOW)
self.assertEqual(ctypes.get_errno(), 0)
@mock.patch('swift.common.linkat.linkat._c_linkat', None)
def test_unavailable(self):
self.assertRaises(EnvironmentError, linkat, 0, None, 0, None, 0)
def test_unavailable_in_libc(self):
class LibC(object):
def __init__(self):
self.linkat_retrieved = False
@property
def linkat(self):
self.linkat_retrieved = True
raise AttributeError
libc = LibC()
mock_cdll = mock.Mock(return_value=libc)
with mock.patch('ctypes.CDLL', new=mock_cdll):
# Force re-construction of a `Linkat` instance
# Something you're not supposed to do in actual code
new_linkat = type(linkat)()
self.assertFalse(new_linkat.available)
libc_name = ctypes.util.find_library('c')
mock_cdll.assert_called_once_with(libc_name, use_errno=True)
self.assertTrue(libc.linkat_retrieved)
@requires_o_tmpfile_support_in_tmp
def test_linkat_success(self):
fd = None
path = None
ret = -1
try:
fd = os.open(gettempdir(), O_TMPFILE | os.O_WRONLY)
path = os.path.join(gettempdir(), uuid4().hex)
ret = linkat(linkat.AT_FDCWD, "/proc/self/fd/%d" % (fd),
linkat.AT_FDCWD, path, linkat.AT_SYMLINK_FOLLOW)
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(path))
finally:
if fd:
os.close(fd)
if path and ret == 0:
# if linkat succeeded, remove file
os.unlink(path)
@mock.patch('swift.common.linkat.linkat._c_linkat')
def test_linkat_fd_not_integer(self, _mock_linkat):
self.assertRaises(TypeError, linkat,
"not_int", None, "not_int", None, 0)
self.assertFalse(_mock_linkat.called)
| swift-master | test/unit/common/test_linkat.py |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tests for swift.common.swob"
import datetime
import unittest
import re
import time
from io import BytesIO
import six
from six.moves.urllib.parse import quote
import swift.common.swob as swob
from swift.common import utils, exceptions
from test.unit.common.middleware.helpers import LeakTrackingIter
class TestHeaderEnvironProxy(unittest.TestCase):
def test_proxy(self):
environ = {}
proxy = swob.HeaderEnvironProxy(environ)
self.assertIs(environ, proxy.environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
# NB: WSGI strings
proxy['X-Object-Meta-Unicode-\xff-Bu\xc3\x9fe'] = '\xe2\x98\xb9'
self.assertEqual(proxy.environ, {
'CONTENT_LENGTH': '20',
'CONTENT_TYPE': 'text/plain',
'HTTP_SOMETHING_ELSE': 'somevalue',
'HTTP_X_OBJECT_META_UNICODE_\xff_BU\xc3\x9fE': '\xe2\x98\xb9'})
self.assertEqual(proxy['content-length'], '20')
self.assertEqual(proxy['content-type'], 'text/plain')
self.assertEqual(proxy['something-else'], 'somevalue')
self.assertEqual(set(['Something-Else',
'X-Object-Meta-Unicode-\xff-Bu\xc3\x9fE',
'Content-Length', 'Content-Type']),
set(proxy.keys()))
self.assertEqual(list(iter(proxy)), proxy.keys())
self.assertEqual(4, len(proxy))
def test_ignored_keys(self):
# Constructor doesn't normalize keys
key = 'wsgi.input'
environ = {key: ''}
proxy = swob.HeaderEnvironProxy(environ)
self.assertEqual([], list(iter(proxy)))
self.assertEqual([], proxy.keys())
self.assertEqual(0, len(proxy))
self.assertRaises(KeyError, proxy.__getitem__, key)
self.assertNotIn(key, proxy)
self.assertIn(key, proxy.environ)
self.assertIs(environ, proxy.environ)
proxy['Content-Type'] = 'text/plain'
self.assertEqual(['Content-Type'], list(iter(proxy)))
self.assertEqual(['Content-Type'], proxy.keys())
self.assertEqual(1, len(proxy))
self.assertEqual('text/plain', proxy['Content-Type'])
self.assertIn('Content-Type', proxy)
def test_del(self):
environ = {}
proxy = swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
del proxy['Content-Length']
del proxy['Content-Type']
del proxy['Something-Else']
self.assertEqual(proxy.environ, {})
self.assertEqual(0, len(proxy))
with self.assertRaises(KeyError):
del proxy['Content-Length']
def test_contains(self):
environ = {}
proxy = swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertTrue('content-length' in proxy)
self.assertTrue('content-type' in proxy)
self.assertTrue('something-else' in proxy)
def test_keys(self):
environ = {}
proxy = swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEqual(
set(proxy.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestRange(unittest.TestCase):
def test_range(self):
swob_range = swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges[0], (1, 7))
def test_upsidedown_range(self):
swob_range = swob.Range('bytes=5-10')
self.assertEqual(swob_range.ranges_for_length(2), [])
def test_str(self):
for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1',
'bytes=1-7,9-12', 'bytes=-7,9-'):
swob_range = swob.Range(range_str)
self.assertEqual(str(swob_range), range_str)
def test_ranges_for_length(self):
swob_range = swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 8)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertIsNone(swob_range.ranges_for_length(None))
def test_ranges_for_large_length(self):
swob_range = swob.Range('bytes=-100000000000000000000000')
self.assertEqual(swob_range.ranges_for_length(100), [(0, 100)])
def test_ranges_for_length_no_end(self):
swob_range = swob.Range('bytes=1-')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertIsNone(swob_range.ranges_for_length(None))
# This used to freak out:
swob_range = swob.Range('bytes=100-')
self.assertEqual(swob_range.ranges_for_length(5), [])
self.assertIsNone(swob_range.ranges_for_length(None))
swob_range = swob.Range('bytes=4-6,100-')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5)])
def test_ranges_for_length_no_start(self):
swob_range = swob.Range('bytes=-7')
self.assertEqual(swob_range.ranges_for_length(10), [(3, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(0, 5)])
self.assertIsNone(swob_range.ranges_for_length(None))
swob_range = swob.Range('bytes=4-6,-100')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5), (0, 5)])
def test_ranges_for_length_multi(self):
swob_range = swob.Range('bytes=-20,4-')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length greater than each range element
self.assertEqual(swob_range.ranges_for_length(200),
[(180, 200), (4, 200)])
swob_range = swob.Range('bytes=30-150,-10')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length lands in the middle of a range
self.assertEqual(swob_range.ranges_for_length(90),
[(30, 90), (80, 90)])
# the actual length greater than any of the range
self.assertEqual(swob_range.ranges_for_length(200),
[(30, 151), (190, 200)])
self.assertIsNone(swob_range.ranges_for_length(None))
def test_ranges_for_length_edges(self):
swob_range = swob.Range('bytes=0-1, -7')
self.assertEqual(swob_range.ranges_for_length(10),
[(0, 2), (3, 10)])
swob_range = swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(10),
[(3, 10), (0, 2)])
swob_range = swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(5),
[(0, 5), (0, 2)])
def test_ranges_for_length_overlapping(self):
# Fewer than 3 overlaps is okay
swob_range = swob.Range('bytes=10-19,15-24')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25)])
swob_range = swob.Range('bytes=10-19,15-24,20-29')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25), (20, 30)])
# Adjacent ranges, though suboptimal, don't overlap
swob_range = swob.Range('bytes=10-19,20-29,30-39')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (20, 30), (30, 40)])
# Ranges that share a byte do overlap
swob_range = swob.Range('bytes=10-20,20-30,30-40,40-50')
self.assertEqual(swob_range.ranges_for_length(100), [])
# With suffix byte range specs (e.g. bytes=-2), make sure that we
# correctly determine overlapping-ness based on the entity length
swob_range = swob.Range('bytes=10-15,15-20,30-39,-9')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 16), (15, 21), (30, 40), (91, 100)])
self.assertEqual(swob_range.ranges_for_length(20), [])
def test_ranges_for_length_nonascending(self):
few_ranges = ("bytes=100-109,200-209,300-309,500-509,"
"400-409,600-609,700-709")
many_ranges = few_ranges + ",800-809"
swob_range = swob.Range(few_ranges)
self.assertEqual(swob_range.ranges_for_length(100000),
[(100, 110), (200, 210), (300, 310), (500, 510),
(400, 410), (600, 610), (700, 710)])
swob_range = swob.Range(many_ranges)
self.assertEqual(swob_range.ranges_for_length(100000), [])
def test_ranges_for_length_too_many(self):
at_the_limit_ranges = (
"bytes=" + ",".join("%d-%d" % (x * 1000, x * 1000 + 10)
for x in range(50)))
too_many_ranges = at_the_limit_ranges + ",10000000-10000009"
rng = swob.Range(at_the_limit_ranges)
self.assertEqual(len(rng.ranges_for_length(1000000000)), 50)
rng = swob.Range(too_many_ranges)
self.assertEqual(rng.ranges_for_length(1000000000), [])
def test_range_invalid_syntax(self):
def _assert_invalid_range(range_value):
try:
swob.Range(range_value)
self.fail("Expected %r to be invalid, but wasn't" %
(range_value,))
except ValueError:
pass
"""
All the following cases should result ValueError exception
1. value not starts with bytes=
2. range value start is greater than the end, eg. bytes=5-3
3. range does not have start or end, eg. bytes=-
4. range does not have hyphen, eg. bytes=45
5. range value is non numeric
6. any combination of the above
"""
_assert_invalid_range(None)
_assert_invalid_range('nonbytes=0-')
_assert_invalid_range('nonbytes=foobar,10-2')
_assert_invalid_range('bytes=5-3')
_assert_invalid_range('bytes=-')
_assert_invalid_range('bytes=45')
_assert_invalid_range('bytes=foo-bar,3-5')
_assert_invalid_range('bytes=4-10,45')
_assert_invalid_range('bytes=foobar,3-5')
_assert_invalid_range('bytes=nonumber-5')
_assert_invalid_range('bytes=nonumber')
_assert_invalid_range('bytes=--1')
_assert_invalid_range('bytes=--0')
class TestMatch(unittest.TestCase):
def test_match(self):
match = swob.Match('"a", "b"')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertIn('a', match)
self.assertIn('"a"', match)
self.assertNotIn('""a""', match)
self.assertIn('b', match)
self.assertNotIn('c', match)
self.assertNotIn(None, match)
self.assertEqual(repr(match), "Match('a, b')")
def test_match_star(self):
match = swob.Match('"a", "*"')
self.assertIn('a', match)
self.assertIn('"a"', match)
self.assertIn('""a""', match)
self.assertIn('b', match)
self.assertIn('c', match)
self.assertIn(None, match)
self.assertEqual(repr(match), "Match('*, a')")
def test_match_noquote(self):
match = swob.Match('a, b')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertIn('a', match)
self.assertIn('"a"', match)
self.assertNotIn('""a""', match)
self.assertIn('b', match)
self.assertNotIn('c', match)
self.assertNotIn(None, match)
def test_match_no_optional_white_space(self):
match = swob.Match('"a","b"')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertIn('a', match)
self.assertIn('"a"', match)
self.assertNotIn('""a""', match)
self.assertIn('b', match)
self.assertNotIn('c', match)
self.assertNotIn(None, match)
def test_match_lots_of_optional_white_space(self):
match = swob.Match('"a" , , "b" ')
self.assertEqual(match.tags, set(('a', 'b')))
self.assertIn('a', match)
self.assertIn('"a"', match)
self.assertNotIn('""a""', match)
self.assertIn('b', match)
self.assertNotIn('c', match)
self.assertNotIn(None, match)
class TestEtag(unittest.TestCase):
def test_normalize_etag(self):
expectations = {
'': '',
'"': '"',
'""': '',
'foo': 'foo',
'"bar"': 'bar',
'"baz': '"baz',
'buz"': 'buz"',
'"fuz""': 'fuz"',
u'\u2661': u'\u2661',
u'"\u2661"': u'\u2661',
u'"\u2661': u'"\u2661',
}
failures = []
for given, expected in expectations.items():
found = swob.normalize_etag(given)
if found != expected:
failures.append(
'given %r expected %r != %r' % (given, expected, found))
if failures:
self.fail('Some expectations failed:\n' + '\n'.join(failures))
def test_normalize_bytes(self):
some_etag = b'"some-etag"'
if six.PY2:
self.assertEqual('some-etag', swob.normalize_etag(some_etag))
else:
self.assertRaises(TypeError, swob.normalize_etag, some_etag)
class TestTransferEncoding(unittest.TestCase):
def test_is_chunked(self):
headers = {}
self.assertFalse(swob.is_chunked(headers))
headers['Transfer-Encoding'] = 'chunked'
self.assertTrue(swob.is_chunked(headers))
headers['Transfer-Encoding'] = 'gzip,chunked'
try:
swob.is_chunked(headers)
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip'")
headers['Transfer-Encoding'] = 'gzip'
try:
swob.is_chunked(headers)
except ValueError as e:
self.assertEqual(str(e), "Invalid Transfer-Encoding header value")
else:
self.fail("Expected a ValueError raised for 'gzip'")
headers['Transfer-Encoding'] = 'gzip,identity'
try:
swob.is_chunked(headers)
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip,identity'")
class TestAccept(unittest.TestCase):
def test_accept_json(self):
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*',
'text/*,application/json', 'application/*,text/*',
'application/json,text/xml'):
acc = swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEqual(match, 'application/json')
def test_accept_plain(self):
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
acc = swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/json',
'application/xml', 'text/xml'])
self.assertEqual(match, 'text/plain')
def test_accept_xml(self):
for accept in ('application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0',
'application/xml;charset=UTF-8',
'application/xml;charset=UTF-8;qws="quoted with space"',
'application/xml; q=0.99 ; qws="quoted with space"'):
acc = swob.Accept(accept)
match = acc.best_match(['text/plain', 'application/xml',
'text/xml'])
self.assertEqual(match, 'application/xml')
def test_accept_invalid(self):
for accept in ('*', 'text/plain,,', 'some stuff',
'application/xml;q=1.0;q=1.1', 'text/plain,*',
'text /plain', 'text\x7f/plain',
'text/plain;a=b=c',
'text/plain;q=1;q=2',
'text/plain;q=not-a-number',
'text/plain; ubq="unbalanced " quotes"'):
acc = swob.Accept(accept)
with self.assertRaises(ValueError):
acc.best_match(['text/plain', 'application/xml', 'text/xml'])
def test_repr(self):
acc = swob.Accept("application/json")
self.assertEqual(repr(acc), "application/json")
class TestRequest(unittest.TestCase):
def test_blank(self):
req = swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEqual(req.path_info, '/')
self.assertEqual(req.body, b'hi')
self.assertEqual(req.headers['Content-Type'], 'text/plain')
self.assertEqual(req.method, 'POST')
def test_blank_req_environ_property_args(self):
blank = swob.Request.blank
req = blank('/', method='PATCH')
self.assertEqual(req.method, 'PATCH')
self.assertEqual(req.environ['REQUEST_METHOD'], 'PATCH')
req = blank('/', referer='http://example.com')
self.assertEqual(req.referer, 'http://example.com')
self.assertEqual(req.referrer, 'http://example.com')
self.assertEqual(req.environ['HTTP_REFERER'], 'http://example.com')
self.assertEqual(req.headers['Referer'], 'http://example.com')
req = blank('/', script_name='/application')
self.assertEqual(req.script_name, '/application')
self.assertEqual(req.environ['SCRIPT_NAME'], '/application')
req = blank('/', host='www.example.com')
self.assertEqual(req.host, 'www.example.com')
self.assertEqual(req.environ['HTTP_HOST'], 'www.example.com')
self.assertEqual(req.headers['Host'], 'www.example.com')
req = blank('/', remote_addr='127.0.0.1')
self.assertEqual(req.remote_addr, '127.0.0.1')
self.assertEqual(req.environ['REMOTE_ADDR'], '127.0.0.1')
req = blank('/', remote_user='username')
self.assertEqual(req.remote_user, 'username')
self.assertEqual(req.environ['REMOTE_USER'], 'username')
req = blank('/', user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.environ['HTTP_USER_AGENT'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.headers['User-Agent'],
'curl/7.22.0 (x86_64-pc-linux-gnu)')
req = blank('/', query_string='a=b&c=d')
self.assertEqual(req.query_string, 'a=b&c=d')
self.assertEqual(req.environ['QUERY_STRING'], 'a=b&c=d')
req = blank('/', if_match='*')
self.assertEqual(req.environ['HTTP_IF_MATCH'], '*')
self.assertEqual(req.headers['If-Match'], '*')
# multiple environ property kwargs
req = blank('/', method='PATCH', referer='http://example.com',
script_name='/application', host='www.example.com',
remote_addr='127.0.0.1', remote_user='username',
user_agent='curl/7.22.0 (x86_64-pc-linux-gnu)',
query_string='a=b&c=d', if_match='*')
self.assertEqual(req.method, 'PATCH')
self.assertEqual(req.referer, 'http://example.com')
self.assertEqual(req.script_name, '/application')
self.assertEqual(req.host, 'www.example.com')
self.assertEqual(req.remote_addr, '127.0.0.1')
self.assertEqual(req.remote_user, 'username')
self.assertEqual(req.user_agent, 'curl/7.22.0 (x86_64-pc-linux-gnu)')
self.assertEqual(req.query_string, 'a=b&c=d')
self.assertEqual(req.environ['QUERY_STRING'], 'a=b&c=d')
def test_invalid_req_environ_property_args(self):
# getter only property
try:
swob.Request.blank(
'/', host_url='http://example.com:8080/v1/a/c/o')
except TypeError as e:
self.assertEqual("got unexpected keyword argument 'host_url'",
str(e))
else:
self.fail("invalid req_environ_property didn't raise error!")
# regular attribute
try:
swob.Request.blank('/', _params_cache={'a': 'b'})
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument '_params_cache'", str(e))
else:
self.fail("invalid req_environ_property didn't raise error!")
# non-existent attribute
try:
swob.Request.blank('/', params_cache={'a': 'b'})
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument 'params_cache'", str(e))
else:
self.fail("invalid req_environ_property didn't raise error!")
# method
try:
swob.Request.blank(
'/', as_referer='GET http://example.com')
except TypeError as e:
self.assertEqual("got unexpected keyword "
"argument 'as_referer'", str(e))
else:
self.fail("invalid req_environ_property didn't raise error!")
def test_blank_path_info_precedence(self):
blank = swob.Request.blank
req = blank('/a')
self.assertEqual(req.path_info, '/a')
req = blank('/a', environ={'PATH_INFO': '/a/c'})
self.assertEqual(req.path_info, '/a/c')
req = blank('/a', environ={'PATH_INFO': '/a/c'}, path_info='/a/c/o')
self.assertEqual(req.path_info, '/a/c/o')
req = blank('/a', path_info='/a/c/o')
self.assertEqual(req.path_info, '/a/c/o')
def test_blank_body_precedence(self):
req = swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi')
self.assertEqual(req.path_info, '/')
self.assertEqual(req.body, b'hi')
self.assertEqual(req.headers['Content-Type'], 'text/plain')
self.assertEqual(req.method, 'POST')
body_file = BytesIO(b'asdf')
req = swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi',
body_file=body_file)
self.assertTrue(req.body_file is body_file)
req = swob.Request.blank(
'/', environ={'REQUEST_METHOD': 'POST',
'wsgi.input': BytesIO(b'')},
headers={'Content-Type': 'text/plain'}, body='hi',
content_length=3)
self.assertEqual(req.content_length, 3)
self.assertEqual(len(req.body), 2)
def test_blank_parsing(self):
req = swob.Request.blank('http://test.com/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'http')
self.assertEqual(req.environ['SERVER_PORT'], '80')
self.assertEqual(req.environ['SERVER_NAME'], 'test.com')
req = swob.Request.blank('https://test.com:456/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'https')
self.assertEqual(req.environ['SERVER_PORT'], '456')
req = swob.Request.blank('test.com/')
self.assertEqual(req.environ['wsgi.url_scheme'], 'http')
self.assertEqual(req.environ['SERVER_PORT'], '80')
self.assertEqual(req.environ['PATH_INFO'], 'test.com/')
self.assertRaises(TypeError, swob.Request.blank,
'ftp://test.com/')
def test_params(self):
req = swob.Request.blank('/?a=b&c=d')
self.assertEqual(req.params['a'], 'b')
self.assertEqual(req.params['c'], 'd')
new_params = {'e': 'f', 'g': 'h'}
req.params = new_params
self.assertDictEqual(new_params, req.params)
new_params = (('i', 'j'), ('k', 'l'))
req.params = new_params
self.assertDictEqual(dict(new_params), req.params)
def test_unicode_params(self):
# NB: all of these strings are WSGI strings
req = swob.Request.blank(
'/?\xe1\x88\xb4=%E1%88%B4&%FF=\xff')
self.assertEqual(req.params['\xff'], '\xff')
self.assertEqual(req.params['\xe1\x88\xb4'], '\xe1\x88\xb4')
new_params = {'\xff': '\xe1\x88\xb4', '\xe1\x88\xb4': '\xff'}
req.params = new_params
self.assertDictEqual(new_params, req.params)
self.assertIn('%FF=%E1%88%B4', req.environ['QUERY_STRING'])
self.assertIn('%E1%88%B4=%FF', req.environ['QUERY_STRING'])
# ...well, until we get to unicode that isn't WSGI-friendly
new_params = ((u'\u1234', u'\u1234'), )
with self.assertRaises(UnicodeEncodeError):
req.params = new_params
def test_timestamp_missing(self):
req = swob.Request.blank('/')
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp_invalid(self):
req = swob.Request.blank(
'/', headers={'X-Timestamp': 'asdf'})
self.assertRaises(exceptions.InvalidTimestamp,
getattr, req, 'timestamp')
def test_timestamp(self):
req = swob.Request.blank(
'/', headers={'X-Timestamp': '1402447134.13507_00000001'})
expected = utils.Timestamp('1402447134.13507', offset=1)
self.assertEqual(req.timestamp, expected)
self.assertEqual(req.timestamp.normal, expected.normal)
self.assertEqual(req.timestamp.internal, expected.internal)
def test_path(self):
req = swob.Request.blank('/hi?a=b&c=d')
self.assertEqual(req.path, '/hi')
req = swob.Request.blank(
'/', environ={'SCRIPT_NAME': '/hi', 'PATH_INFO': '/there'})
self.assertEqual(req.path, '/hi/there')
def test_path_question_mark(self):
req = swob.Request.blank('/test%3Ffile')
# This tests that .blank unquotes the path when setting PATH_INFO
self.assertEqual(req.environ['PATH_INFO'], '/test?file')
# This tests that .path requotes it
self.assertEqual(req.path, '/test%3Ffile')
def test_path_info_pop(self):
req = swob.Request.blank('/hi/there')
self.assertEqual(req.path_info_pop(), 'hi')
self.assertEqual(req.path_info, '/there')
self.assertEqual(req.script_name, '/hi')
def test_bad_path_info_pop(self):
req = swob.Request.blank('blahblah')
self.assertIsNone(req.path_info_pop())
def test_path_info_pop_last(self):
req = swob.Request.blank('/last')
self.assertEqual(req.path_info_pop(), 'last')
self.assertEqual(req.path_info, '')
self.assertEqual(req.script_name, '/last')
def test_path_info_pop_none(self):
req = swob.Request.blank('/')
self.assertEqual(req.path_info_pop(), '')
self.assertEqual(req.path_info, '')
self.assertEqual(req.script_name, '/')
def test_copy_get(self):
req = swob.Request.blank(
'/hi/there', environ={'REQUEST_METHOD': 'POST'})
self.assertEqual(req.method, 'POST')
req2 = req.copy_get()
self.assertEqual(req2.method, 'GET')
def test_get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return [b'hi']
req = swob.Request.blank('/')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'hi')
def test_401_unauthorized(self):
# No request environment
resp = swob.HTTPUnauthorized()
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
# Request environment
req = swob.Request.blank('/')
resp = swob.HTTPUnauthorized(request=req)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_401_valid_account_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return [b'hi']
# Request environment contains valid account in path
req = swob.Request.blank('/v1/account-name')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
# Request environment contains valid account/container in path
req = swob.Request.blank('/v1/account-name/c')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="account-name"',
resp.headers['Www-Authenticate'])
def test_401_invalid_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return [b'hi']
# Request environment contains bad path
req = swob.Request.blank('/random')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_non_keystone_auth_path(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return [b'no creds in request']
# Request to get token
req = swob.Request.blank('/v1.0/auth')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
# Other form of path
req = swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="unknown"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_exists(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', {
'Www-Authenticate': 'Me realm="whatever"'})
return [b'no creds in request']
# Auth middleware sets own Www-Authenticate
req = swob.Request.blank('/auth/v1.0')
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Me realm="whatever"',
resp.headers['Www-Authenticate'])
def test_401_www_authenticate_is_quoted(self):
def test_app(environ, start_response):
start_response('401 Unauthorized', [])
return [b'hi']
hacker = 'account-name\n\n<b>foo<br>' # url injection test
quoted_hacker = quote(hacker)
req = swob.Request.blank('/v1/' + quoted_hacker)
self.assertIn(hacker, req.environ['PATH_INFO']) # sanity check
resp = req.get_response(test_app)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual('Swift realm="%s"' % quoted_hacker,
resp.headers['Www-Authenticate'])
def test_not_401(self):
# Other status codes should not have WWW-Authenticate in response
def test_app(environ, start_response):
start_response('200 OK', [])
return [b'hi']
req = swob.Request.blank('/')
resp = req.get_response(test_app)
self.assertNotIn('Www-Authenticate', resp.headers)
def test_properties(self):
req = swob.Request.blank('/hi/there', body='hi')
self.assertEqual(req.body, b'hi')
self.assertEqual(req.content_length, 2)
req.remote_addr = 'something'
self.assertEqual(req.environ['REMOTE_ADDR'], 'something')
req.body = 'whatever'
self.assertEqual(req.content_length, 8)
self.assertEqual(req.body, b'whatever')
self.assertEqual(req.method, 'GET')
req.range = 'bytes=1-7'
self.assertEqual(req.range.ranges[0], (1, 7))
self.assertIn('Range', req.headers)
req.range = None
self.assertNotIn('Range', req.headers)
def test_datetime_properties(self):
req = swob.Request.blank('/hi/there', body='hi')
req.if_unmodified_since = 0
self.assertTrue(isinstance(req.if_unmodified_since, datetime.datetime))
if_unmodified_since = req.if_unmodified_since
req.if_unmodified_since = if_unmodified_since
self.assertEqual(if_unmodified_since, req.if_unmodified_since)
req.if_unmodified_since = 'something'
self.assertEqual(req.headers['If-Unmodified-Since'], 'something')
self.assertIsNone(req.if_unmodified_since)
self.assertIn('If-Unmodified-Since', req.headers)
req.if_unmodified_since = None
self.assertNotIn('If-Unmodified-Since', req.headers)
too_big_date_list = list(datetime.datetime.max.timetuple())
too_big_date_list[0] += 1 # bump up the year
too_big_date = time.strftime(
"%a, %d %b %Y %H:%M:%S UTC", time.struct_time(too_big_date_list))
req.if_unmodified_since = too_big_date
self.assertIsNone(req.if_unmodified_since)
def test_bad_range(self):
req = swob.Request.blank('/hi/there', body='hi')
req.range = 'bad range'
self.assertIsNone(req.range)
def test_accept_header(self):
req = swob.Request({'REQUEST_METHOD': 'GET',
'PATH_INFO': '/',
'HTTP_ACCEPT': 'application/json'})
self.assertEqual(
req.accept.best_match(['application/json', 'text/plain']),
'application/json')
self.assertEqual(
req.accept.best_match(['text/plain', 'application/json']),
'application/json')
def test_swift_entity_path(self):
req = swob.Request.blank('/v1/a/c/o')
self.assertEqual(req.swift_entity_path, '/a/c/o')
req = swob.Request.blank('/v1/a/c')
self.assertEqual(req.swift_entity_path, '/a/c')
req = swob.Request.blank('/v1/a')
self.assertEqual(req.swift_entity_path, '/a')
req = swob.Request.blank('/v1')
self.assertIsNone(req.swift_entity_path)
def test_path_qs(self):
req = swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
req = swob.Request({'PATH_INFO': '/hi/there',
'QUERY_STRING': 'hello=equal&acl'})
self.assertEqual(req.path_qs, '/hi/there?hello=equal&acl')
def test_url(self):
req = swob.Request.blank('/hi/there?hello=equal&acl')
self.assertEqual(req.url,
'http://localhost/hi/there?hello=equal&acl')
def test_wsgify(self):
used_req = []
@swob.wsgify
def _wsgi_func(req):
used_req.append(req)
return swob.Response(b'200 OK')
req = swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 200)
def test_wsgify_method(self):
class _wsgi_class(object):
def __init__(self):
self.used_req = []
@swob.wsgify
def __call__(self, req):
self.used_req.append(req)
return swob.Response(b'200 OK')
req = swob.Request.blank('/hi/there')
handler = _wsgi_class()
resp = req.get_response(handler)
self.assertIs(handler.used_req[0].environ, req.environ)
self.assertEqual(resp.status_int, 200)
def test_wsgify_raise(self):
used_req = []
@swob.wsgify
def _wsgi_func(req):
used_req.append(req)
raise swob.HTTPServerError()
req = swob.Request.blank('/hi/there')
resp = req.get_response(_wsgi_func)
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 500)
def test_split_path(self):
"""
Copied from utils.split_path
"""
def _test_split_path(path, minsegs=1, maxsegs=None, rwl=False):
req = swob.Request.blank(path)
return req.split_path(minsegs, maxsegs, rwl)
self.assertRaises(ValueError, _test_split_path, '')
self.assertRaises(ValueError, _test_split_path, '/')
self.assertRaises(ValueError, _test_split_path, '//')
self.assertEqual(_test_split_path('/a'), ['a'])
self.assertRaises(ValueError, _test_split_path, '//a')
self.assertEqual(_test_split_path('/a/'), ['a'])
self.assertRaises(ValueError, _test_split_path, '/a/c')
self.assertRaises(ValueError, _test_split_path, '//c')
self.assertRaises(ValueError, _test_split_path, '/a/c/')
self.assertRaises(ValueError, _test_split_path, '/a//')
self.assertRaises(ValueError, _test_split_path, '/a', 2)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3)
self.assertRaises(ValueError, _test_split_path, '/a', 2, 3, True)
self.assertEqual(_test_split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(_test_split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, _test_split_path, '/a/c/o/r', 3, 3)
self.assertEqual(_test_split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(_test_split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, _test_split_path, '/a', 5, 4)
self.assertEqual(_test_split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(_test_split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
_test_split_path('o%0an e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
_test_split_path('o%0an e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_unicode_path(self):
# Byte sequences always make sense
req = swob.Request.blank(u'/\u2661'.encode('utf8'))
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
req = swob.Request.blank('/')
req.path_info = u'/\u2661'.encode('utf8')
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
if six.PY2:
# Unicode is encoded to UTF-8 on py2, to paper over deserialized
# JSON slipping into subrequests
req = swob.Request.blank(u'/\u2661')
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
req = swob.Request.blank('/')
req.path_info = u'/\u2661'
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
else:
# Arbitrary Unicode *is not* supported on py3 -- only latin-1
# encodable is supported, because PEP-3333.
with self.assertRaises(UnicodeEncodeError):
req = swob.Request.blank(u'/\u2661')
req = swob.Request.blank('/')
with self.assertRaises(UnicodeEncodeError):
req.path_info = u'/\u2661'
# Update didn't take
self.assertEqual(req.path, '/')
self.assertEqual(req.environ['PATH_INFO'], '/')
# Needs to be a "WSGI string"
req = swob.Request.blank('/\xe2\x99\xa1')
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
req = swob.Request.blank('/')
req.path_info = '/\xe2\x99\xa1'
self.assertEqual(req.path, quote(u'/\u2661'.encode('utf-8')))
self.assertEqual(req.environ['PATH_INFO'], '/\xe2\x99\xa1')
def test_unicode_query(self):
# Bytes are always OK
req = swob.Request.blank('/')
encoded = u'\u2661'.encode('utf-8')
req.query_string = b'x=' + encoded
if six.PY2:
self.assertEqual(req.params['x'], encoded)
else:
self.assertEqual(req.params['x'], encoded.decode('latin1'))
if six.PY2:
# Unicode will be UTF-8-encoded on py2
req = swob.Request.blank('/')
req.query_string = u'x=\u2661'
self.assertEqual(req.params['x'], encoded)
else:
# ...but py3 requires "WSGI strings"
req = swob.Request.blank('/')
with self.assertRaises(UnicodeEncodeError):
req.query_string = u'x=\u2661'
self.assertEqual(req.params, {})
req = swob.Request.blank('/')
req.query_string = 'x=' + encoded.decode('latin-1')
self.assertEqual(req.params['x'], encoded.decode('latin-1'))
def test_url2(self):
pi = '/hi/there'
path = pi
req = swob.Request.blank(path)
sche = 'http'
exp_url = '%s://localhost%s' % (sche, pi)
self.assertEqual(req.url, exp_url)
qs = 'hello=equal&acl'
path = '%s?%s' % (pi, qs)
s, p = 'unit.test.example.com', '90'
req = swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'SERVER_NAME': s,
'SERVER_PORT': p})
exp_url = '%s://%s:%s%s?%s' % (sche, s, p, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
req = swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':80'})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com'
sche = 'https'
req = swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host + ':443',
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
host = 'unit.test.example.com:81'
req = swob.Request({'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.url, exp_url)
def test_as_referer(self):
pi = '/hi/there'
qs = 'hello=equal&acl'
sche = 'https'
host = 'unit.test.example.com:81'
req = swob.Request({'REQUEST_METHOD': 'POST',
'PATH_INFO': pi,
'QUERY_STRING': qs,
'HTTP_HOST': host,
'wsgi.url_scheme': sche})
exp_url = '%s://%s%s?%s' % (sche, host, pi, qs)
self.assertEqual(req.as_referer(), 'POST ' + exp_url)
def test_message_length_just_content_length(self):
req = swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'})
self.assertIsNone(req.message_length())
req = swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
body='x' * 42)
self.assertEqual(req.message_length(), 42)
req.headers['Content-Length'] = 'abc'
try:
req.message_length()
except ValueError as e:
self.assertEqual(str(e), "Invalid Content-Length header value")
else:
self.fail("Expected a ValueError raised for 'abc'")
def test_message_length_transfer_encoding(self):
req = swob.Request.blank(
u'/',
environ={'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/'},
headers={'transfer-encoding': 'chunked'},
body='x' * 42)
self.assertIsNone(req.message_length())
req.headers['Transfer-Encoding'] = 'gzip,chunked'
try:
req.message_length()
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip'
try:
req.message_length()
except ValueError as e:
self.assertEqual(str(e), "Invalid Transfer-Encoding header value")
else:
self.fail("Expected a ValueError raised for 'gzip'")
req.headers['Transfer-Encoding'] = 'gzip,identity'
try:
req.message_length()
except AttributeError as e:
self.assertEqual(str(e), "Unsupported Transfer-Coding header"
" value specified in Transfer-Encoding header")
else:
self.fail("Expected an AttributeError raised for 'gzip,identity'")
def test_allow_reserved_names(self):
req = swob.Request.blank('', headers={})
self.assertFalse(req.allow_reserved_names)
req = swob.Request.blank('', headers={
'X-Allow-Reserved-Names': 'true'})
self.assertFalse(req.allow_reserved_names)
req = swob.Request.blank('', headers={
'X-Backend-Allow-Reserved-Names': 'false'})
self.assertFalse(req.allow_reserved_names)
req = swob.Request.blank('', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
self.assertTrue(req.allow_reserved_names)
class TestStatusMap(unittest.TestCase):
def test_status_map(self):
response_args = []
def start_response(status, headers):
response_args.append(status)
response_args.append(headers)
resp_cls = swob.status_map[404]
resp = resp_cls()
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.title, 'Not Found')
body = b''.join(resp({}, start_response))
self.assertIn(b'The resource could not be found.', body)
self.assertEqual(response_args[0], '404 Not Found')
headers = dict(response_args[1])
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
self.assertTrue(int(headers['Content-Length']) > 0)
class TestResponse(unittest.TestCase):
def _get_response(self):
def test_app(environ, start_response):
start_response('200 OK', [])
return [b'hi']
req = swob.Request.blank('/')
return req.get_response(test_app)
def test_properties(self):
resp = self._get_response()
resp.location = 'something'
self.assertEqual(resp.location, 'something')
self.assertIn('Location', resp.headers)
resp.location = None
self.assertNotIn('Location', resp.headers)
resp.content_type = 'text/plain'
self.assertIn('Content-Type', resp.headers)
resp.content_type = None
self.assertNotIn('Content-Type', resp.headers)
def test_empty_body(self):
resp = self._get_response()
resp.body = b''
self.assertEqual(resp.body, b'')
def test_unicode_body(self):
resp = self._get_response()
with self.assertRaises(TypeError) as catcher:
resp.body = u'\N{SNOWMAN}'
self.assertEqual(str(catcher.exception),
'WSGI responses must be bytes')
def test_call_reifies_request_if_necessary(self):
"""
The actual bug was a HEAD response coming out with a body because the
Request object wasn't passed into the Response object's constructor.
The Response object's __call__ method should be able to reify a
Request object from the env it gets passed.
"""
tracking = {
'closed': 0,
'read': 0,
}
def mark_closed(*args):
tracking['closed'] += 1
def mark_read(*args):
tracking['read'] += 1
def test_app(environ, start_response):
start_response('200 OK', [])
body = [b'hi']
return LeakTrackingIter(body, mark_closed, mark_read, None)
req = swob.Request.blank('/')
req.method = 'HEAD'
status, headers, app_iter = req.call_application(test_app)
resp = swob.Response(status=status, headers=dict(headers),
app_iter=app_iter)
output_iter = resp(req.environ, lambda *_: None)
with utils.closing_if_possible(output_iter):
body = b''.join(output_iter)
self.assertEqual(body, b'')
self.assertEqual(tracking, {
'closed': 1,
'read': 1,
})
def test_call_preserves_closeability(self):
def test_app(environ, start_response):
start_response('200 OK', [])
yield "igloo"
yield "shindig"
yield "macadamia"
yield "hullabaloo"
req = swob.Request.blank('/')
req.method = 'GET'
status, headers, app_iter = req.call_application(test_app)
iterator = iter(app_iter)
self.assertEqual('igloo', next(iterator))
self.assertEqual('shindig', next(iterator))
app_iter.close()
with self.assertRaises(StopIteration):
next(iterator)
def test_call_finds_nonempty_chunk(self):
def test_app(environ, start_response):
start_response('400 Bad Request', [])
yield ''
start_response('200 OK', [])
yield 'complete '
yield ''
yield 'response'
req = swob.Request.blank('/')
req.method = 'GET'
status, headers, app_iter = req.call_application(test_app)
self.assertEqual(status, '200 OK')
self.assertEqual(list(app_iter), ['complete ', '', 'response'])
def test_call_requires_that_start_response_is_called(self):
def test_app(environ, start_response):
yield 'response'
req = swob.Request.blank('/')
req.method = 'GET'
with self.assertRaises(RuntimeError) as mgr:
req.call_application(test_app)
self.assertEqual(mgr.exception.args[0],
'application never called start_response')
def test_location_rewrite(self):
def start_response(env, headers):
pass
req = swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost/something')
req = swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:80'})
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost/something')
req = swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'http'})
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://somehost:443/something')
req = swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost:443',
'wsgi.url_scheme': 'https'})
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'https://somehost/something')
def test_location_rewrite_no_host(self):
def start_response(env, headers):
pass
req = swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 80})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://local/something')
req = swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://local:81/something')
def test_location_no_rewrite(self):
def start_response(env, headers):
pass
req = swob.Request.blank(
'/', environ={'HTTP_HOST': 'somehost'})
resp = self._get_response()
resp.location = 'http://www.google.com/'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, 'http://www.google.com/')
def test_location_no_rewrite_when_told_not_to(self):
def start_response(env, headers):
pass
req = swob.Request.blank(
'/', environ={'SERVER_NAME': 'local', 'SERVER_PORT': 81,
'swift.leave_relative_location': True})
del req.environ['HTTP_HOST']
resp = self._get_response()
resp.location = '/something'
# read response
b''.join(resp(req.environ, start_response))
self.assertEqual(resp.location, '/something')
def test_app_iter(self):
def start_response(env, headers):
pass
resp = self._get_response()
resp.app_iter = [b'a', b'b', b'c']
body = b''.join(resp({}, start_response))
self.assertEqual(body, b'abc')
def test_multi_ranges_wo_iter_ranges(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return [b'1234567890']
req = swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
b''.join(resp._response_iter(resp.app_iter, b''))
self.assertEqual(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_single_range_wo_iter_range(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return [b'1234567890']
req = swob.Request.blank(
'/', headers={'Range': 'bytes=0-9'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 10
# read response
b''.join(resp._response_iter(resp.app_iter, b''))
self.assertEqual(resp.status, '200 OK')
self.assertEqual(10, resp.content_length)
def test_multi_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '4')])
return [b'abcd']
req = swob.Request.blank(
'/', headers={'Range': 'bytes=0-9,10-19,20-29'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 100
resp.content_type = 'text/plain; charset=utf8'
content = b''.join(resp._response_iter(None,
(b'0123456789112345678'
b'92123456789')))
self.assertTrue(re.match(br'--([a-f0-9]{32})\r\n'
br'Content-Type: text/plain; charset=utf8\r\n'
br'Content-Range: bytes '
br'0-9/100\r\n\r\n0123456789\r\n'
br'--\1\r\n'
br'Content-Type: text/plain; charset=utf8\r\n'
br'Content-Range: bytes '
br'10-19/100\r\n\r\n1123456789\r\n'
br'--\1\r\n'
br'Content-Type: text/plain; charset=utf8\r\n'
br'Content-Range: bytes '
br'20-29/100\r\n\r\n2123456789\r\n'
br'--\1--', content))
def test_multi_response_iter(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10'),
('Content-Type', 'application/xml')])
return [b'0123456789']
app_iter_ranges_args = []
class App_iter(object):
def app_iter_ranges(self, ranges, content_type, boundary, size):
app_iter_ranges_args.append((ranges, content_type, boundary,
size))
for i in range(3):
yield (str(i) + 'fun').encode('ascii')
yield boundary
def __iter__(self):
for i in range(3):
yield (str(i) + 'fun').encode('ascii')
req = swob.Request.blank(
'/', headers={'Range': 'bytes=1-5,8-11'})
resp = req.get_response(test_app)
resp.conditional_response = True
resp.content_length = 12
content = b''.join(resp._response_iter(App_iter(), b''))
boundary = content[-32:]
self.assertEqual(content[:-32], b'0fun1fun2fun')
self.assertEqual(app_iter_ranges_args,
[([(1, 6), (8, 12)], b'application/xml',
boundary, 12)])
def test_range_body(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Length', '10')])
return [b'1234567890']
def start_response(env, headers):
pass
req = swob.Request.blank(
'/', headers={'Range': 'bytes=1-3'})
resp = swob.Response(
body=b'1234567890', request=req,
conditional_response=True)
body = b''.join(resp({}, start_response))
self.assertEqual(body, b'234')
self.assertEqual(resp.content_range, 'bytes 1-3/10')
self.assertEqual(resp.status, '206 Partial Content')
# syntactically valid, but does not make sense, so returning 416
# in next couple of cases.
req = swob.Request.blank(
'/', headers={'Range': 'bytes=-0'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = b''.join(resp({}, start_response))
self.assertIn(b'The Range requested is not available', body)
self.assertEqual(resp.content_length, len(body))
self.assertEqual(resp.status, '416 Requested Range Not Satisfiable')
self.assertEqual(resp.content_range, 'bytes */10')
resp = swob.Response(
body=b'1234567890', request=req,
conditional_response=True)
body = b''.join(resp({}, start_response))
self.assertIn(b'The Range requested is not available', body)
self.assertEqual(resp.content_length, len(body))
self.assertEqual(resp.status, '416 Requested Range Not Satisfiable')
# Syntactically-invalid Range headers "MUST" be ignored
req = swob.Request.blank(
'/', headers={'Range': 'bytes=3-2'})
resp = req.get_response(test_app)
resp.conditional_response = True
body = b''.join(resp({}, start_response))
self.assertEqual(body, b'1234567890')
self.assertEqual(resp.status, '200 OK')
self.assertNotIn('Content-Range', resp.headers)
resp = swob.Response(
body=b'1234567890', request=req,
conditional_response=True)
body = b''.join(resp({}, start_response))
self.assertEqual(body, b'1234567890')
self.assertEqual(resp.status, '200 OK')
def test_content_type(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEqual(resp.content_type, 'text/plain')
def test_charset(self):
resp = self._get_response()
resp.content_type = 'text/plain; charset=utf8'
self.assertEqual(resp.charset, 'utf8')
resp.charset = 'utf16'
self.assertEqual(resp.charset, 'utf16')
def test_charset_content_type(self):
resp = swob.Response(
content_type='text/plain', charset='utf-8')
self.assertEqual(resp.charset, 'utf-8')
resp = swob.Response(
charset='utf-8', content_type='text/plain')
self.assertEqual(resp.charset, 'utf-8')
def test_etag(self):
resp = self._get_response()
resp.etag = 'hi'
self.assertEqual(resp.headers['Etag'], '"hi"')
self.assertEqual(resp.etag, 'hi')
self.assertIn('etag', resp.headers)
resp.etag = None
self.assertNotIn('etag', resp.headers)
def test_host_url_default(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'http://bob:1234')
def test_host_url_default_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '80'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'http://bob')
def test_host_url_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'https://bob:1234')
def test_host_url_https_port_squelched(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '443'
del env['HTTP_HOST']
self.assertEqual(resp.host_url, 'https://bob')
def test_host_url_host_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother'
self.assertEqual(resp.host_url, 'http://someother')
def test_host_url_host_port_override(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'http'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEqual(resp.host_url, 'http://someother:5678')
def test_host_url_host_https(self):
resp = self._get_response()
env = resp.environ
env['wsgi.url_scheme'] = 'https'
env['SERVER_NAME'] = 'bob'
env['SERVER_PORT'] = '1234'
env['HTTP_HOST'] = 'someother:5678'
self.assertEqual(resp.host_url, 'https://someother:5678')
def test_507(self):
resp = swob.HTTPInsufficientStorage()
content = b''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEqual(
content,
b'<html><h1>Insufficient Storage</h1><p>There was not enough '
b'space to save the resource. Drive: unknown</p></html>')
resp = swob.HTTPInsufficientStorage(drive='sda1')
content = b''.join(resp._response_iter(resp.app_iter, resp._body))
self.assertEqual(
content,
b'<html><h1>Insufficient Storage</h1><p>There was not enough '
b'space to save the resource. Drive: sda1</p></html>')
def test_200_with_body_and_headers(self):
headers = {'Content-Length': '0'}
content = b'foo'
resp = swob.HTTPOk(body=content, headers=headers)
self.assertEqual(resp.body, content)
self.assertEqual(resp.content_length, len(content))
def test_init_with_body_headers_app_iter(self):
# body exists but no headers and no app_iter
body = b'ok'
resp = swob.Response(body=body)
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with 0 content_length exist but no app_iter
body = b'ok'
resp = swob.Response(
body=body, headers={'Content-Length': '0'})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with content_length exist but no app_iter
body = b'ok'
resp = swob.Response(
body=body, headers={'Content-Length': '5'})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body and headers with no content_length exist but no app_iter
body = b'ok'
resp = swob.Response(body=body, headers={})
self.assertEqual(resp.body, body)
self.assertEqual(resp.content_length, len(body))
# body, headers with content_length and app_iter exist
resp = swob.Response(
body=b'ok', headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEqual(resp.content_length, 5)
self.assertEqual(resp.body, b'')
# headers with content_length and app_iter exist but no body
resp = swob.Response(
headers={'Content-Length': '5'}, app_iter=iter([]))
self.assertEqual(resp.content_length, 5)
self.assertEqual(resp.body, b'')
# app_iter exists but no body and headers
resp = swob.Response(app_iter=iter([]))
self.assertIsNone(resp.content_length)
self.assertEqual(resp.body, b'')
class TestConditionalIfNoneMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return [b'hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# etag matches --> 304
req = swob.Request.blank(
'/', headers={'If-None-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
def test_quoted_simple_match(self):
# double quotes don't matter
req = swob.Request.blank(
'/', headers={'If-None-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
def test_list_match(self):
# it works with lists of etags to match
req = swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "the-etag", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
def test_list_no_match(self):
# no matches --> whatever the original status was
req = swob.Request.blank(
'/', headers={'If-None-Match': '"bert", "ernie"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swob.Request.blank(
'/', headers={'If-None-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
class TestConditionalIfMatch(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response('200 OK', [('Etag', 'the-etag')])
return [b'hi']
def fake_start_response(*a, **kw):
pass
def test_simple_match(self):
# if etag matches, proceed as normal
req = swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_simple_conditional_etag_match(self):
# if etag matches, proceed as normal
req = swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_quoted_simple_match(self):
# double quotes or not, doesn't matter
req = swob.Request.blank(
'/', headers={'If-Match': '"the-etag"'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_no_match(self):
# no match --> 412
req = swob.Request.blank(
'/', headers={'If-Match': 'not-the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, b'')
def test_simple_conditional_etag_no_match(self):
req = swob.Request.blank(
'/', headers={'If-Match': 'the-etag'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
resp._conditional_etag = 'not-the-etag'
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, b'')
def test_match_star(self):
# "*" means match anything; see RFC 2616 section 14.24
req = swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_match_star_on_404(self):
def fake_app_404(environ, start_response):
start_response('404 Not Found', [])
return [b'hi']
req = swob.Request.blank(
'/', headers={'If-Match': '*'})
resp = req.get_response(fake_app_404)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, b'')
class TestConditionalIfModifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 27 Feb 2014 03:29:37 GMT')])
return [b'hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_before(self):
req = swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_same(self):
req = swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
def test_greater(self):
req = swob.Request.blank(
'/',
headers={'If-Modified-Since': 'Thu, 27 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 304)
self.assertEqual(body, b'')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swob.Request.blank(
'/',
headers={'If-Modified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
class TestConditionalIfUnmodifiedSince(unittest.TestCase):
def fake_app(self, environ, start_response):
start_response(
'200 OK', [('Last-Modified', 'Thu, 20 Feb 2014 03:29:37 GMT')])
return [b'hi']
def fake_start_response(*a, **kw):
pass
def test_absent(self):
req = swob.Request.blank('/')
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_before(self):
req = swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:36 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 412)
self.assertEqual(body, b'')
def test_same(self):
req = swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:37 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_greater(self):
req = swob.Request.blank(
'/',
headers={'If-Unmodified-Since': 'Thu, 20 Feb 2014 03:29:38 GMT'})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
def test_out_of_range_is_ignored(self):
# All that datetime gives us is a ValueError or OverflowError when
# something is out of range (i.e. less than datetime.datetime.min or
# greater than datetime.datetime.max). Unfortunately, we can't
# distinguish between a date being too old and a date being too new,
# so the best we can do is ignore such headers.
max_date_list = list(datetime.datetime.max.timetuple())
max_date_list[0] += 1 # bump up the year
too_big_date_header = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.struct_time(max_date_list))
req = swob.Request.blank(
'/',
headers={'If-Unmodified-Since': too_big_date_header})
resp = req.get_response(self.fake_app)
resp.conditional_response = True
body = b''.join(resp(req.environ, self.fake_start_response))
self.assertEqual(resp.status_int, 200)
self.assertEqual(body, b'hi')
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_swob.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import unittest
import os
from contextlib import contextmanager
import time
import pickle
import mock
from six.moves import urllib
from swift.common import direct_client
from swift.common.direct_client import DirectClientException
from swift.common.exceptions import ClientException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import Timestamp, quote, md5
from swift.common.swob import RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
from six.moves.http_client import HTTPException
from test.debug_logger import debug_logger
from test.unit import patch_policies
class FakeConn(object):
def __init__(self, status, headers=None, body='', **kwargs):
self.status = status
try:
self.reason = RESPONSE_REASONS[self.status][0]
except Exception:
self.reason = 'Fake'
self.body = body
self.resp_headers = HeaderKeyDict()
if headers:
self.resp_headers.update(headers)
self.etag = None
def _update_raw_call_args(self, *args, **kwargs):
capture_attrs = ('host', 'port', 'method', 'path', 'req_headers',
'query_string')
for attr, value in zip(capture_attrs, args[:len(capture_attrs)]):
setattr(self, attr, value)
return self
def getresponse(self):
if self.etag:
self.resp_headers['etag'] = str(self.etag.hexdigest())
if isinstance(self.status, Exception):
raise self.status
return self
def getheader(self, header, default=None):
return self.resp_headers.get(header, default)
def getheaders(self):
return self.resp_headers.items()
def read(self, amt=None):
if isinstance(self.body, io.BytesIO):
return self.body.read(amt)
elif amt is None:
return self.body
else:
return Exception('Not a StringIO entry')
def send(self, data):
if not self.etag:
self.etag = md5(usedforsecurity=False)
self.etag.update(data)
@contextmanager
def mocked_http_conn(*args, **kwargs):
mocked = kwargs.pop('mocked', 'swift.common.bufferedhttp.http_connect_raw')
fake_conn = FakeConn(*args, **kwargs)
mock_http_conn = lambda *args, **kwargs: \
fake_conn._update_raw_call_args(*args, **kwargs)
with mock.patch(mocked, new=mock_http_conn):
yield fake_conn
@patch_policies
class TestDirectClient(unittest.TestCase):
def setUp(self):
self.node = json.loads(json.dumps({ # json roundtrip to ring-like
'ip': '1.2.3.4', 'port': '6200', 'device': 'sda',
'replication_ip': '1.2.3.5', 'replication_port': '7000'}))
self.part = '0'
self.account = u'\u062a account'
self.container = u'\u062a container'
self.obj = u'\u062a obj/name'
self.account_path = '/sda/0/%s' % urllib.parse.quote(
self.account.encode('utf-8'))
self.container_path = '/sda/0/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container))
self.obj_path = '/sda/0/%s/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container, self.obj))
self.user_agent = 'direct-client %s' % os.getpid()
class FakeTimeout(BaseException):
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
pass
patcher = mock.patch.object(direct_client, 'Timeout', FakeTimeout)
patcher.start()
self.addCleanup(patcher.stop)
def test_gen_headers(self):
stub_user_agent = 'direct-client %s' % os.getpid()
headers = direct_client.gen_headers(add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123.45')):
headers = direct_client.gen_headers()
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000000123.45000',
})
headers = direct_client.gen_headers(hdrs_in={'x-timestamp': '15'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '15',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345.6789')):
headers = direct_client.gen_headers(hdrs_in={'foo-bar': '63'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '63',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.67890',
})
hdrs_in = {'foo-bar': '55'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '55',
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345')):
headers = direct_client.gen_headers(hdrs_in={'user-agent': '32'})
self.assertEqual(dict(headers), {
'User-Agent': '32',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.00000',
})
hdrs_in = {'user-agent': '47'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': '47',
'X-Backend-Allow-Reserved-Names': 'true',
})
for policy in POLICIES:
for add_ts in (True, False):
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123456789')):
headers = direct_client.gen_headers(
{'X-Backend-Storage-Policy-Index': policy.idx},
add_ts=add_ts)
expected = {
'User-Agent': stub_user_agent,
'X-Backend-Storage-Policy-Index': str(policy.idx),
'X-Backend-Allow-Reserved-Names': 'true',
}
if add_ts:
expected['X-Timestamp'] = '0123456789.00000'
self.assertEqual(dict(headers), expected)
def test_direct_get_account(self):
def do_test(req_params):
stub_headers = HeaderKeyDict({
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-PUT-Timestamp': '1234567890'})
body = b'[{"count": 1, "bytes": 20971520, "name": "c1"}]'
with mocked_http_conn(200, stub_headers, body) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account, **req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(resp_headers, stub_headers)
self.assertEqual(json.loads(body), resp)
actual_params = conn.query_string.split('&')
exp_params = ['%s=%s' % (k, v)
for k, v in req_params.items()
if v is not None]
exp_params.append('format=json')
self.assertEqual(sorted(actual_params), sorted(exp_params))
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_client_exception(self):
stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'}
body = 'a server error has occurred'
with mocked_http_conn(500, stub_headers, body):
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(self.node, self.part,
self.account)
self.assertEqual(raised.exception.http_status, 500)
expected_err_msg_parts = (
'Account server %s:%s' % (self.node['ip'], self.node['port']),
'GET %r' % self.account_path,
'status 500',
)
for item in expected_err_msg_parts:
self.assertIn(item, str(raised.exception))
self.assertEqual(raised.exception.http_host, self.node['ip'])
self.assertEqual(raised.exception.http_port, self.node['port'])
self.assertEqual(raised.exception.http_device, self.node['device'])
self.assertEqual(raised.exception.http_status, 500)
self.assertEqual(raised.exception.http_reason, 'Internal Error')
self.assertEqual(raised.exception.http_headers, stub_headers)
def test_direct_get_account_no_content_does_not_parse_body(self):
headers = {
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-Put-Timestamp': '1234567890'}
with mocked_http_conn(204, headers) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertDictEqual(resp_headers, headers)
self.assertEqual([], resp)
def test_direct_get_account_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_delete_account(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(self.node, part, account)
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_replication_net(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(
self.node, part, account,
headers={'X-Backend-Use-Replication-Network': 't'})
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['replication_ip'], ip)
self.assertNotEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['replication_port'], port)
self.assertNotEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_failure(self):
part = '0'
account = 'a'
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_account(self.node, part, account)
self.assertEqual(self.node['ip'], conn.host)
self.assertEqual(self.node['port'], conn.port)
self.assertEqual('DELETE', conn.method)
self.assertEqual('/sda/0/a', conn.path)
self.assertIn('X-Timestamp', conn.req_headers)
self.assertIn('User-Agent', conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_head_container(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_replication_net(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': 'on'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_error(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(503, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
# check request
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 503)
self.assertEqual(raised.exception.http_headers, headers)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_container_deleted(self):
important_timestamp = Timestamp.now().internal
headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
important_timestamp})
with mocked_http_conn(404, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(raised.exception.http_headers, headers)
def test_direct_get_container(self):
def do_test(req_params):
headers = HeaderKeyDict({'key': 'value'})
body = (b'[{"hash": "8f4e3", "last_modified": "317260", '
b'"bytes": 209}]')
with mocked_http_conn(200, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container,
**req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual(json.loads(body), resp)
actual_params = conn.query_string.split('&')
exp_params = ['%s=%s' % (k, v)
for k, v in req_params.items()
if v is not None]
exp_params.append('format=json')
self.assertEqual(sorted(actual_params), sorted(exp_params))
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_get_container_no_content_does_not_decode_body(self):
headers = {}
body = ''
with mocked_http_conn(204, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual([], resp)
def test_direct_get_container_with_extra_params(self):
def do_test(req_params, expected_params):
headers = HeaderKeyDict({'key': 'value'})
body = (b'[{"hash": "8f4e3", "last_modified": "317260", '
b'"bytes": 209}]')
with mocked_http_conn(200, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container,
**req_params)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(
conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual(json.loads(body), resp)
actual_params = conn.query_string.split('&')
exp_params = ['%s=%s' % (k, v)
for k, v in expected_params.items()]
exp_params.append('format=json')
self.assertEqual(sorted(actual_params), sorted(exp_params))
req_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'states': 'updating',
'test': 'okay'})
expected_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter', limit=10,
end_marker='my-endmarker', reverse='on',
states='updating', test='okay')
do_test(req_params, expected_params)
req_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'states': None})
expected_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter', limit=10,
end_marker='my-endmarker', reverse='on')
req_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={})
expected_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter', limit=10,
end_marker='my-endmarker', reverse='on')
do_test(req_params, expected_params)
req_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'states': 'updating',
'marker': 'others'})
with self.assertRaises(TypeError) as cm:
do_test(req_params, expected_params={})
self.assertIn('duplicate values for keyword arg: marker',
str(cm.exception))
req_params = dict(marker='my-marker', prefix='my-prefix',
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'prefix': 'others'})
with self.assertRaises(TypeError) as cm:
do_test(req_params, expected_params=None)
self.assertIn('duplicate values for keyword arg: prefix',
str(cm.exception))
req_params = dict(marker='my-marker', delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'prefix': 'others'})
expected_params = dict(marker='my-marker', prefix='others',
delimiter='my-delimiter', limit=10,
end_marker='my-endmarker', reverse='on')
do_test(req_params, expected_params)
req_params = dict(marker='my-marker', prefix=None,
delimiter='my-delimiter',
limit=10, end_marker='my-endmarker', reverse='on',
extra_params={'prefix': 'others'})
expected_params = dict(marker='my-marker', prefix='others',
delimiter='my-delimiter', limit=10,
end_marker='my-endmarker', reverse='on')
do_test(req_params, expected_params)
req_params = dict(extra_params={'limit': 10, 'empty': '',
'test': True, 'zero': 0})
expected_params = dict(limit='10', empty='', test='True', zero='0')
do_test(req_params, expected_params)
def test_direct_delete_container(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_replication_net(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': '1'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_container_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_put_container(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Length': str(len(body)),
'Content-Type': 'application/json',
'User-Agent': 'my UA'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Content-Length'],
str(len(body)))
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertEqual(conn.req_headers['User-Agent'], 'my UA')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertEqual(
md5(body, usedforsecurity=False).hexdigest(),
conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_chunked(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Type': 'application/json'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Transfer-Encoding'], 'chunked')
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertNotIn('Content-Length', conn.req_headers)
expected_sent = b'%0x\r\n%s\r\n0\r\n\r\n' % (len(body), body)
self.assertEqual(
md5(expected_sent, usedforsecurity=False).hexdigest(),
conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_fail(self):
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
side_effect=Exception('conn failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('conn failed', str(cm.exception))
with mocked_http_conn(Exception('resp failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('resp failed', str(cm.exception))
def test_direct_put_container_object(self):
headers = {'x-foo': 'bar'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIsNone(rv)
def test_direct_put_container_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('PUT' in str(raised.exception))
def test_direct_post_container(self):
headers = {'x-foo': 'bar', 'User-Agent': 'my UA'}
with mocked_http_conn(204) as conn:
resp = direct_client.direct_post_container(
self.node, self.part, self.account, self.container,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['User-Agent'], 'my UA')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertEqual(204, resp.status)
def test_direct_delete_container_object(self):
with mocked_http_conn(204) as conn:
rv = direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(rv)
def test_direct_delete_container_obj_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_head_object(self):
headers = HeaderKeyDict({'x-foo': 'bar'})
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIn('x-timestamp', conn.req_headers)
self.assertEqual(headers, resp)
def test_direct_head_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_object_not_found(self):
important_timestamp = Timestamp.now().internal
stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
with mocked_http_conn(404, headers=stub_headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(
raised.exception.http_headers['x-backend-important-timestamp'],
important_timestamp)
def test_direct_get_object(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(obj_body, contents.getvalue())
def test_direct_get_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_object(
self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_get_object_chunks(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj,
resp_chunk_size=2)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('GET', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual([b'12', b'34', b'56'], list(obj_body))
def test_direct_post_object(self):
headers = {'Key': 'value'}
resp_headers = []
with mocked_http_conn(200, resp_headers) as conn:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container, self.obj,
headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
def test_direct_post_object_error(self):
headers = {'Key': 'value'}
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container,
self.obj, headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('POST' in str(raised.exception))
def test_direct_delete_object(self):
with mocked_http_conn(200) as conn:
resp = direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(resp)
def test_direct_delete_object_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_object_error(self):
with mocked_http_conn(503) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 503)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_get_suffix_hashes(self):
data = {'a83': 'c130a2c17ed45102aada0f4eee69494ff'}
body = pickle.dumps(data)
with mocked_http_conn(200, {}, body) as conn:
resp = direct_client.direct_get_suffix_hashes(self.node,
self.part, ['a83'])
self.assertEqual(conn.method, 'REPLICATE')
self.assertEqual(conn.path, '/sda/0/a83')
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertEqual(data, resp)
def _test_direct_get_suffix_hashes_fail(self, status_code):
with mocked_http_conn(status_code):
with self.assertRaises(DirectClientException) as cm:
direct_client.direct_get_suffix_hashes(
self.node, self.part, ['a83', 'b52'])
self.assertIn('REPLICATE', cm.exception.args[0])
self.assertIn(quote('/%s/%s/a83-b52'
% (self.node['device'], self.part)),
cm.exception.args[0])
self.assertIn(self.node['replication_ip'], cm.exception.args[0])
self.assertIn(self.node['replication_port'], cm.exception.args[0])
self.assertEqual(self.node['replication_ip'], cm.exception.http_host)
self.assertEqual(self.node['replication_port'], cm.exception.http_port)
self.assertEqual(self.node['device'], cm.exception.http_device)
self.assertEqual(status_code, cm.exception.http_status)
def test_direct_get_suffix_hashes_503(self):
self._test_direct_get_suffix_hashes_fail(503)
def test_direct_get_suffix_hashes_507(self):
self._test_direct_get_suffix_hashes_fail(507)
def test_direct_put_object_with_content_length(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, 6)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(
md5(b'123456', usedforsecurity=False).hexdigest(),
resp)
def test_direct_put_object_fail(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_object(
self.node, self.part, self.account, self.container,
self.obj, contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_put_object_chunked(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(
md5(b'6\r\n123456\r\n0\r\n\r\n',
usedforsecurity=False).hexdigest(),
resp)
def test_direct_put_object_args(self):
# One test to cover all missing checks
contents = ""
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, etag="testing-etag", content_type='Text')
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual(conn.req_headers['Content-Length'], '0')
self.assertEqual(conn.req_headers['Content-Type'], 'Text')
self.assertEqual(
md5(b'0\r\n\r\n', usedforsecurity=False).hexdigest(),
resp)
def test_direct_put_object_header_content_length(self):
contents = io.BytesIO(b'123456')
stub_headers = HeaderKeyDict({
'Content-Length': '6'})
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, headers=stub_headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(conn.req_headers['Content-length'], '6')
self.assertEqual(
md5(b'123456', usedforsecurity=False).hexdigest(),
resp)
def test_retry(self):
headers = HeaderKeyDict({'key': 'value'})
with mocked_http_conn(200, headers) as conn:
attempts, resp = direct_client.retry(
direct_client.direct_head_object, self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp)
self.assertEqual(attempts, 1)
def test_retry_client_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(500) as conn:
with self.assertRaises(direct_client.ClientException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual(err_ctx.exception.http_status, 500)
self.assertIn('DELETE', err_ctx.exception.args[0])
self.assertIn(self.obj_path,
err_ctx.exception.args[0])
self.assertIn(self.node['ip'], err_ctx.exception.args[0])
self.assertIn(self.node['port'], err_ctx.exception.args[0])
self.assertEqual(self.node['ip'], err_ctx.exception.http_host)
self.assertEqual(self.node['port'], err_ctx.exception.http_port)
self.assertEqual(self.node['device'], err_ctx.exception.http_device)
self.assertEqual(500, err_ctx.exception.http_status)
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('500 Internal Error', line)
def test_retry_http_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(HTTPException('Kaboom!')) as conn:
with self.assertRaises(HTTPException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual('Kaboom!', str(err_ctx.exception))
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('Kaboom!', line)
def test_direct_get_recon(self):
data = {
"/etc/swift/account.ring.gz": "de7316d2809205fa13ebfc747566260c",
"/etc/swift/container.ring.gz": "8e63c916fec81825cc40940eefe1d058",
"/etc/swift/object.ring.gz": "a77f51c14bbf7075bb7be0c27fd00dc4",
"/etc/swift/object-1.ring.gz": "f0222326f80ee5cb34b7546b18727923",
"/etc/swift/object-2.ring.gz": "2228dc8a7ff1cf2eb89b116653ac6191"}
body = json.dumps(data)
with mocked_http_conn(
200, {}, body,
mocked='swift.common.direct_client.http_connect_raw') as conn:
resp = direct_client.direct_get_recon(self.node, "ringmd5")
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, '/recon/ringmd5')
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(data, resp)
# Now check failure
with mocked_http_conn(
500,
mocked='swift.common.direct_client.http_connect_raw') as conn:
with self.assertRaises(ClientException) as raised:
resp = direct_client.direct_get_recon(self.node, "ringmd5")
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, '/recon/ringmd5')
self.assertEqual(raised.exception.http_status, 500)
class TestUTF8DirectClient(TestDirectClient):
def setUp(self):
super(TestUTF8DirectClient, self).setUp()
self.account = self.account.encode('utf-8')
self.container = self.container.encode('utf-8')
self.obj = self.obj.encode('utf-8')
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/test_direct_client.py |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import json
import unittest
from tempfile import mkdtemp
from shutil import rmtree
import os
import mock
from swift.common import ring, utils
from swift.common.utils import split_path
from swift.common.swob import Request, Response
from swift.common.middleware import list_endpoints
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit import patch_policies
class FakeApp(object):
def __call__(self, env, start_response):
return Response(body="FakeApp")(env, start_response)
def start_response(*args):
pass
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)])
class TestListEndpoints(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
self.testdir = mkdtemp()
accountgz = os.path.join(self.testdir, 'account.ring.gz')
containergz = os.path.join(self.testdir, 'container.ring.gz')
objectgz = os.path.join(self.testdir, 'object.ring.gz')
objectgz_1 = os.path.join(self.testdir, 'object-1.ring.gz')
self.policy_to_test = 0
self.expected_path = ('v1', 'a', 'c', 'o1')
# Let's make the rings slightly different so we can test
# that the correct ring is consulted (e.g. we don't consult
# the object ring to get nodes for a container)
intended_replica2part2dev_id_a = [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])]
intended_replica2part2dev_id_c = [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])]
intended_replica2part2dev_id_o = [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])]
intended_replica2part2dev_id_o_1 = [
array.array('H', [1, 0, 1, 0]),
array.array('H', [1, 0, 1, 0]),
array.array('H', [4, 3, 4, 3])]
intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6200,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6200,
'device': 'sdd1'}]
intended_part_shift = 30
ring.RingData(intended_replica2part2dev_id_a,
intended_devs, intended_part_shift).save(accountgz)
ring.RingData(intended_replica2part2dev_id_c,
intended_devs, intended_part_shift).save(containergz)
ring.RingData(intended_replica2part2dev_id_o,
intended_devs, intended_part_shift).save(objectgz)
ring.RingData(intended_replica2part2dev_id_o_1,
intended_devs, intended_part_shift).save(objectgz_1)
self.app = FakeApp()
self.list_endpoints = list_endpoints.filter_factory(
{'swift_dir': self.testdir})(self.app)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def FakeGetInfo(self, env, app, swift_source=None):
info = {'status': 0, 'sync_key': None, 'meta': {},
'cors': {'allow_origin': None, 'expose_headers': None,
'max_age': None},
'sysmeta': {}, 'read_acl': None,
'object_count': None, 'write_acl': None, 'versions': None,
'bytes': None}
info['storage_policy'] = self.policy_to_test
(version, account, container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
self.assertEqual((version, account, container),
self.expected_path[:3])
return info
def test_parse_response_version(self):
expectations = {
'': 1.0, # legacy compat
'/1': 1.0,
'/v1': 1.0,
'/1.0': 1.0,
'/v1.0': 1.0,
'/2': 2.0,
'/v2': 2.0,
'/2.0': 2.0,
'/v2.0': 2.0,
}
accounts = (
'AUTH_test',
'test',
'verybadreseller_prefix'
'verybadaccount'
)
for expected_account in accounts:
for version, expected in expectations.items():
path = '/endpoints%s/%s/c/o' % (version, expected_account)
req = Request.blank(path)
version, account, container, obj = \
self.list_endpoints._parse_path(req)
try:
self.assertEqual(version, expected)
self.assertEqual(account, expected_account)
except AssertionError:
self.fail('Unexpected result from parse path %r: %r != %r'
% (path, (version, account),
(expected, expected_account)))
def test_parse_version_that_looks_like_account(self):
"""
Demonstrate the failure mode for versions that look like accounts,
if you can make _parse_path better and this is the *only* test that
fails you can delete it ;)
"""
bad_versions = (
'v_3',
'verybadreseller_prefix',
)
for bad_version in bad_versions:
req = Request.blank('/endpoints/%s/a/c/o' % bad_version)
version, account, container, obj = \
self.list_endpoints._parse_path(req)
self.assertEqual(version, 1.0)
self.assertEqual(account, bad_version)
self.assertEqual(container, 'a')
self.assertEqual(obj, 'c/o')
def test_parse_account_that_looks_like_version(self):
"""
Demonstrate the failure mode for accounts that looks like versions,
if you can make _parse_path better and this is the *only* test that
fails you can delete it ;)
"""
bad_accounts = (
'v3.0', 'verybaddaccountwithnoprefix',
)
for bad_account in bad_accounts:
req = Request.blank('/endpoints/%s/c/o' % bad_account)
self.assertRaises(ValueError,
self.list_endpoints._parse_path, req)
even_worse_accounts = {
'v1': 1.0,
'v2.0': 2.0,
}
for bad_account, guessed_version in even_worse_accounts.items():
req = Request.blank('/endpoints/%s/c/o' % bad_account)
version, account, container, obj = \
self.list_endpoints._parse_path(req)
self.assertEqual(version, guessed_version)
self.assertEqual(account, 'c')
self.assertEqual(container, 'o')
self.assertIsNone(obj)
def test_get_object_ring(self):
self.assertEqual(isinstance(self.list_endpoints.get_object_ring(0),
ring.Ring), True)
self.assertEqual(isinstance(self.list_endpoints.get_object_ring(1),
ring.Ring), True)
self.assertRaises(ValueError, self.list_endpoints.get_object_ring, 99)
def test_parse_path_no_version_specified(self):
req = Request.blank('/endpoints/a/c/o1')
version, account, container, obj = \
self.list_endpoints._parse_path(req)
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o1')
def test_parse_path_with_valid_version(self):
req = Request.blank('/endpoints/v2/a/c/o1')
version, account, container, obj = \
self.list_endpoints._parse_path(req)
self.assertEqual(version, 2.0)
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o1')
def test_parse_path_with_invalid_version(self):
req = Request.blank('/endpoints/v3/a/c/o1')
self.assertRaises(ValueError, self.list_endpoints._parse_path,
req)
def test_parse_path_with_no_account(self):
bad_paths = ('v1', 'v2', '')
for path in bad_paths:
req = Request.blank('/endpoints/%s' % path)
try:
self.list_endpoints._parse_path(req)
self.fail('Expected ValueError to be raised')
except ValueError as err:
self.assertEqual(str(err), 'No account specified')
def test_get_endpoint(self):
# Expected results for objects taken from test_ring
# Expected results for others computed by manually invoking
# ring.get_nodes().
resp = Request.blank('/endpoints/a/c/o1').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sdb1/1/a/c/o1",
"http://10.1.2.2:6200/sdd1/1/a/c/o1"
])
# test policies with no version endpoint name
expected = [[
"http://10.1.1.1:6200/sdb1/1/a/c/o1",
"http://10.1.2.2:6200/sdd1/1/a/c/o1"], [
"http://10.1.1.1:6200/sda1/1/a/c/o1",
"http://10.1.2.1:6200/sdc1/1/a/c/o1"
]]
PATCHGI = 'swift.common.middleware.list_endpoints.get_container_info'
for pol in POLICIES:
self.policy_to_test = pol.idx
with mock.patch(PATCHGI, self.FakeGetInfo):
resp = Request.blank('/endpoints/a/c/o1').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.body),
expected[pol.idx])
# Here, 'o1/' is the object name.
resp = Request.blank('/endpoints/a/c/o1/').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sdb1/3/a/c/o1/",
"http://10.1.2.2:6200/sdd1/3/a/c/o1/"
])
resp = Request.blank('/endpoints/a/c2').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sda1/2/a/c2",
"http://10.1.2.1:6200/sdc1/2/a/c2"
])
resp = Request.blank('/endpoints/a1').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.2.1:6200/sdc1/0/a1",
"http://10.1.1.1:6200/sda1/0/a1",
"http://10.1.1.1:6200/sdb1/0/a1"
])
resp = Request.blank('/endpoints/').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 400)
resp = Request.blank('/endpoints/a/c 2').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sdb1/3/a/c%202",
"http://10.1.2.2:6200/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/a/c%202').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sdb1/3/a/c%202",
"http://10.1.2.2:6200/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/ac%20count/con%20tainer/ob%20ject') \
.get_response(self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body), [
"http://10.1.1.1:6200/sdb1/3/ac%20count/con%20tainer/ob%20ject",
"http://10.1.2.2:6200/sdd1/3/ac%20count/con%20tainer/ob%20ject"
])
resp = Request.blank('/endpoints/a/c/o1', {'REQUEST_METHOD': 'POST'}) \
.get_response(self.list_endpoints)
self.assertEqual(resp.status_int, 405)
self.assertEqual(resp.status, '405 Method Not Allowed')
self.assertEqual(resp.headers['allow'], 'GET')
resp = Request.blank('/not-endpoints').get_response(
self.list_endpoints)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body, b'FakeApp')
# test policies with custom endpoint name
for pol in POLICIES:
# test custom path with trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path/'
})(self.app)
self.policy_to_test = pol.idx
with mock.patch(PATCHGI, self.FakeGetInfo):
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.body),
expected[pol.idx])
# test custom path without trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path'
})(self.app)
self.policy_to_test = pol.idx
with mock.patch(PATCHGI, self.FakeGetInfo):
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(json.loads(resp.body),
expected[pol.idx])
def test_v1_response(self):
req = Request.blank('/endpoints/v1/a/c/o1')
resp = req.get_response(self.list_endpoints)
expected = ["http://10.1.1.1:6200/sdb1/1/a/c/o1",
"http://10.1.2.2:6200/sdd1/1/a/c/o1"]
self.assertEqual(json.loads(resp.body), expected)
def test_v2_obj_response(self):
req = Request.blank('/endpoints/v2/a/c/o1')
resp = req.get_response(self.list_endpoints)
expected = {
'endpoints': ["http://10.1.1.1:6200/sdb1/1/a/c/o1",
"http://10.1.2.2:6200/sdd1/1/a/c/o1"],
'headers': {'X-Backend-Storage-Policy-Index': "0"},
}
self.assertEqual(json.loads(resp.body), expected)
for policy in POLICIES:
patch_path = 'swift.common.middleware.list_endpoints' \
'.get_container_info'
mock_get_container_info = lambda *args, **kwargs: \
{'storage_policy': int(policy)}
with mock.patch(patch_path, mock_get_container_info):
resp = req.get_response(self.list_endpoints)
part, nodes = policy.object_ring.get_nodes('a', 'c', 'o1')
[node.update({'part': part}) for node in nodes]
path = 'http://%(ip)s:%(port)s/%(device)s/%(part)s/a/c/o1'
expected = {
'headers': {
'X-Backend-Storage-Policy-Index': str(int(policy))},
'endpoints': [path % node for node in nodes],
}
self.assertEqual(json.loads(resp.body), expected)
def test_v2_non_obj_response(self):
# account
req = Request.blank('/endpoints/v2/a')
resp = req.get_response(self.list_endpoints)
expected = {
'endpoints': ["http://10.1.2.1:6200/sdc1/0/a",
"http://10.1.1.1:6200/sda1/0/a",
"http://10.1.1.1:6200/sdb1/0/a"],
'headers': {},
}
# container
self.assertEqual(json.loads(resp.body), expected)
req = Request.blank('/endpoints/v2/a/c')
resp = req.get_response(self.list_endpoints)
expected = {
'endpoints': ["http://10.1.2.2:6200/sdd1/0/a/c",
"http://10.1.1.1:6200/sda1/0/a/c",
"http://10.1.2.1:6200/sdc1/0/a/c"],
'headers': {},
}
self.assertEqual(json.loads(resp.body), expected)
def test_version_account_response(self):
req = Request.blank('/endpoints/a')
resp = req.get_response(self.list_endpoints)
expected = ["http://10.1.2.1:6200/sdc1/0/a",
"http://10.1.1.1:6200/sda1/0/a",
"http://10.1.1.1:6200/sdb1/0/a"]
self.assertEqual(json.loads(resp.body), expected)
req = Request.blank('/endpoints/v1.0/a')
resp = req.get_response(self.list_endpoints)
self.assertEqual(json.loads(resp.body), expected)
req = Request.blank('/endpoints/v2/a')
resp = req.get_response(self.list_endpoints)
expected = {
'endpoints': ["http://10.1.2.1:6200/sdc1/0/a",
"http://10.1.1.1:6200/sda1/0/a",
"http://10.1.1.1:6200/sdb1/0/a"],
'headers': {},
}
self.assertEqual(json.loads(resp.body), expected)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_list_endpoints.py |
#!/usr/bin/env python
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
import json
import mock
from six.moves.urllib.parse import parse_qs
from swift.common import swob
from swift.common.middleware import symlink, copy, versioned_writes, \
listing_formats
from swift.common.swob import Request
from swift.common.request_helpers import get_reserved_name
from swift.common.utils import MD5_OF_EMPTY_STRING
from swift.common.registry import get_swift_info
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.common.middleware.test_versioned_writes import FakeCache
class TestSymlinkMiddlewareBase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.sym = symlink.filter_factory({
'symloop_max': '2',
})(self.app)
self.sym.logger = self.app.logger
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = b''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_sym(self, req, **kwargs):
return self.call_app(req, app=self.sym, **kwargs)
class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
def test_symlink_info(self):
swift_info = get_swift_info()
self.assertEqual(swift_info['symlink'], {
'symloop_max': 2,
'static_links': True,
})
def test_symlink_simple_put(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
self.assertEqual('application/symlink', hdrs.get('Content-Type'))
def test_symlink_simple_put_with_content_type(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'Content-Type': 'application/linkyfoo'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
self.assertEqual('application/linkyfoo', hdrs.get('Content-Type'))
def test_symlink_simple_put_with_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_quoted_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': '"tgt-etag"',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_etag_target_missing_content_type(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
# N.B. the ObjectController would call _update_content_type on PUT
# regardless, but you actually can't get a HEAD response without swob
# setting a Content-Type
self.assertEqual('text/html; charset=UTF-8',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_etag_explicit_content_type(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42,
'Content-Type': 'application/foo'})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'tgt-etag',
'Content-Type': 'application/bar',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=42' % MD5_OF_EMPTY_STRING)
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/bar',
self.app._calls[-1].headers['Content-Type'])
def test_symlink_simple_put_with_unmatched_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'tgt-etag', 'Content-Length': 42})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
self.assertEqual(body, b"Object Etag 'tgt-etag' does not match "
b"X-Symlink-Target-Etag header 'not-tgt-etag'")
def test_symlink_simple_put_to_non_existing_object(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
self.assertIn(b'does not exist', body)
def test_symlink_simple_put_error(self):
self.app.register('HEAD', '/v1/a/c1/o',
swob.HTTPInternalServerError, {}, 'bad news')
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'not-tgt-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '500 Internal Error')
# this is a PUT response; so if we have a content-length...
self.assertGreater(int(dict(headers)['Content-Length']), 0)
# ... we better have a body!
self.assertIn(b'Internal Error', body)
def test_symlink_simple_put_to_non_existing_object_override(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPNotFound, {})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'some-tgt-etag',
# this header isn't normally sent with PUT
'X-Symlink-Target-Bytes': '13',
}, body='')
# this can be set in container_sync
req.environ['swift.symlink_override'] = True
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
def test_symlink_put_with_prevalidated_etag(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT', headers={
'X-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'tgt-etag',
'X-Object-Sysmeta-Symlink-Target-Bytes': '13',
'Content-Type': 'application/foo',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
# N.B. no HEAD!
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
self.assertEqual('application/foo',
self.app._calls[-1].headers['Content-Type'])
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o; '
'symlink_target_etag=tgt-etag; '
'symlink_target_bytes=13' % MD5_OF_EMPTY_STRING)
def test_symlink_put_with_prevalidated_etag_sysmeta_incomplete(self):
req = Request.blank('/v1/a/c/symlink', method='PUT', headers={
'X-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'tgt-etag',
}, body='')
with self.assertRaises(KeyError) as cm:
self.call_sym(req)
self.assertEqual(cm.exception.args[0], swob.header_to_environ_key(
'X-Object-Sysmeta-Symlink-Target-Bytes'))
def test_symlink_chunked_put(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
environ={'wsgi.input': io.BytesIO(b'')})
self.assertIsNone(req.content_length) # sanity
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertNotIn('X-Object-Sysmeta-Symlink-Target-Account', hdrs)
val = hdrs.get('X-Object-Sysmeta-Container-Update-Override-Etag')
self.assertEqual(val, '%s; symlink_target=c1/o' % MD5_OF_EMPTY_STRING)
def test_symlink_chunked_put_error(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o'},
environ={'wsgi.input':
io.BytesIO(b'this has a body')})
self.assertIsNone(req.content_length) # sanity
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
def test_symlink_put_different_account(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a1'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'),
'a1')
def test_symlink_put_leading_slash(self):
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': '/c1/o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(body, b"X-Symlink-Target header must be of "
b"the form <container name>/<object name>")
def test_symlink_put_non_zero_length(self):
req = Request.blank('/v1/a/c/symlink', method='PUT', body='req_body',
headers={'X-Symlink-Target': 'c1/o'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
self.assertEqual(body, b'Symlink requests require a zero byte body')
def test_symlink_put_bad_object_header(self):
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'o'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, "412 Precondition Failed")
self.assertEqual(body, b"X-Symlink-Target header must be of "
b"the form <container name>/<object name>")
def test_symlink_put_bad_account_header(self):
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a1/c1'},
body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, "412 Precondition Failed")
self.assertEqual(body, b"Account name cannot contain slashes")
def test_get_symlink(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Meta-Color': 'Red'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIsInstance(headers, list)
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertEqual(body, b'')
# HEAD with same params find same registered GET
req = Request.blank('/v1/a/c/symlink?symlink=get', method='HEAD')
head_status, head_headers, head_body = self.call_sym(req)
self.assertEqual(head_status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), head_headers)
self.assertNotIn('X-Symlink-Target-Account', dict(head_headers))
self.assertIn(('X-Object-Meta-Color', 'Red'), head_headers)
self.assertEqual(head_body, b'')
def test_get_symlink_with_account(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertIn(('X-Symlink-Target-Account', 'a2'), headers)
def test_get_symlink_not_found(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
self.assertNotIn('Content-Location', dict(headers))
def test_get_target_object(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk, {}, 'resp_body')
req_headers = {'X-Newest': 'True', 'X-Backend-Something': 'future'}
req = Request.blank('/v1/a/c/symlink', method='GET',
headers=req_headers)
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'resp_body')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
calls = self.app.calls_with_headers
req_headers.update({
'Host': 'localhost:80',
'X-Backend-Ignore-Range-If-Metadata-Present':
'x-object-sysmeta-symlink-target',
'X-Backend-Storage-Policy-Index': '2',
})
self.assertEqual(req_headers, calls[0].headers)
req_headers['User-Agent'] = 'Swift'
self.assertEqual(req_headers, calls[1].headers)
self.assertFalse(calls[2:])
self.assertFalse(self.app.unread_requests)
def test_get_target_object_not_found(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPNotFound, {}, '')
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(body, b'')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertFalse(self.app.unread_requests)
def test_get_target_object_range_not_satisfiable(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o',
swob.HTTPRequestedRangeNotSatisfiable, {}, '')
req = Request.blank('/v1/a/c/symlink', method='GET',
headers={'Range': 'bytes=1-2'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '416 Requested Range Not Satisfiable')
self.assertEqual(
body, b'<html><h1>Requested Range Not Satisfiable</h1>'
b'<p>The Range requested is not available.</p></html>')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertFalse(self.app.unread_requests)
def test_get_ec_symlink_range_unsatisfiable_can_redirect_to_target(self):
self.app.register('GET', '/v1/a/c/symlink',
swob.HTTPRequestedRangeNotSatisfiable,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk,
{'Content-Range': 'bytes 1-2/10'}, 'es')
req = Request.blank('/v1/a/c/symlink', method='GET',
headers={'Range': 'bytes=1-2'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'es')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
self.assertIn(('Content-Range', 'bytes 1-2/10'), headers)
def test_get_non_symlink(self):
# this is not symlink object
self.app.register('GET', '/v1/a/c/obj', swob.HTTPOk, {}, 'resp_body')
req = Request.blank('/v1/a/c/obj', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'resp_body')
# Assert special headers for symlink are not in response
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertNotIn('Content-Location', dict(headers))
def test_get_static_link_mismatched_etag(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
# apparently target object was overwritten
self.app.register('GET', '/v1/a/c1/o', swob.HTTPOk,
{'ETag': 'not-the-etag'}, 'resp_body')
req = Request.blank('/v1/a/c/symlink', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b"Object Etag 'not-the-etag' does not "
b"match X-Symlink-Target-Etag header 'the-etag'")
def test_get_static_link_to_symlink(self):
self.app.register('GET', '/v1/a/c/static_link', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/symlink',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'the-etag',
'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
self.app.register('GET', '/v1/a/c1/o', swob.HTTPOk,
{'ETag': 'not-the-etag'}, 'resp_body')
req = Request.blank('/v1/a/c/static_link', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
def test_get_static_link_to_symlink_fails(self):
self.app.register('GET', '/v1/a/c/static_link', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/symlink',
'X-Object-Sysmeta-Symlink-Target-Etag': 'the-etag'})
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'not-the-etag',
'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
req = Request.blank('/v1/a/c/static_link', method='GET')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b"X-Symlink-Target-Etag headers do not match")
def put_static_link_to_symlink(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'ETag': 'symlink-etag',
'X-Object-Sysmeta-Symlink-Target': 'c/o',
'Content-Type': 'application/symlink'})
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk,
{'ETag': 'tgt-etag',
'Content-Type': 'application/data'}, 'resp_body')
self.app.register('PUT', '/v1/a/c/static_link', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/static_link', method='PUT',
headers={
'X-Symlink-Target': 'c/symlink',
'X-Symlink-Target-Etag': 'symlink-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([], self.app.calls)
self.assertEqual('application/data',
self.app._calls[-1].headers['Content-Type'])
def test_head_symlink(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Meta-Color': 'Red'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertIn(('X-Object-Meta-Color', 'Red'), headers)
def test_head_symlink_with_account(self):
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2',
'X-Object-Meta-Color': 'Red'})
req = Request.blank('/v1/a/c/symlink?symlink=get', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertIn(('X-Symlink-Target-Account', 'a2'), headers)
self.assertIn(('X-Object-Meta-Color', 'Red'), headers)
def test_head_target_object(self):
# this test is also validating that the symlink metadata is not
# returned, but the target object metadata does return
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a2/c1/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req_headers = {'X-Newest': 'True', 'X-Backend-Something': 'future'}
req = Request.blank('/v1/a/c/symlink', method='HEAD',
headers=req_headers)
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertNotIn('X-Symlink-Target', dict(headers))
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
calls = self.app.calls_with_headers
req_headers.update({
'Host': 'localhost:80',
'X-Backend-Ignore-Range-If-Metadata-Present':
'x-object-sysmeta-symlink-target',
'X-Backend-Storage-Policy-Index': '2',
})
self.assertEqual(req_headers, calls[0].headers)
req_headers['User-Agent'] = 'Swift'
self.assertEqual(req_headers, calls[1].headers)
self.assertFalse(calls[2:])
def test_get_symlink_to_reserved_object(self):
cont = get_reserved_name('versioned')
obj = get_reserved_name('symlink', '9999998765.99999')
symlink_target = "%s/%s" % (cont, obj)
version_path = '/v1/a/%s' % symlink_target
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
symlink.ALLOW_RESERVED_NAMES: 'true',
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
'x-object-sysmeta-symlink-target-bytes': '0',
})
self.app.register('GET', version_path, swob.HTTPOk, {})
req = Request.blank('/v1/a/versioned/symlink', headers={
'Range': 'foo', 'If-Match': 'bar'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('Content-Location', version_path), headers)
self.assertEqual(len(self.authorized), 1)
self.assertNotIn('X-Backend-Allow-Reserved-Names',
self.app.calls_with_headers[0])
call_headers = self.app.calls_with_headers[1].headers
self.assertEqual('true', call_headers[
'X-Backend-Allow-Reserved-Names'])
self.assertEqual('foo', call_headers['Range'])
self.assertEqual('bar', call_headers['If-Match'])
def test_get_symlink_to_reserved_symlink(self):
cont = get_reserved_name('versioned')
obj = get_reserved_name('symlink', '9999998765.99999')
symlink_target = "%s/%s" % (cont, obj)
version_path = '/v1/a/%s' % symlink_target
self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
symlink.ALLOW_RESERVED_NAMES: 'true',
'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
'x-object-sysmeta-symlink-target-bytes': '0',
})
self.app.register('GET', version_path, swob.HTTPOk, {
symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: 'unversioned/obj',
'ETag': MD5_OF_EMPTY_STRING,
})
self.app.register('GET', '/v1/a/unversioned/obj', swob.HTTPOk, {
})
req = Request.blank('/v1/a/versioned/symlink')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertIn(('Content-Location', '/v1/a/unversioned/obj'), headers)
self.assertEqual(len(self.authorized), 2)
def test_symlink_too_deep(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
self.app.register('GET', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym2'})
self.app.register('GET', '/v1/a/c/sym2', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/o'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b'')
req = Request.blank('/v1/a/c/symlink')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
self.assertEqual(body, b'Too many levels of symbolic links, '
b'maximum allowed is 2')
def test_symlink_change_symloopmax(self):
# similar test to test_symlink_too_deep, but now changed the limit to 3
self.sym = symlink.filter_factory({
'symloop_max': '3',
})(self.app)
self.sym.logger = self.app.logger
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
self.app.register('HEAD', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym2'})
self.app.register('HEAD', '/v1/a/c/sym2', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/o',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
# assert that the correct metadata was returned
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
def test_sym_to_sym_to_target(self):
# this test is also validating that the symlink metadata is not
# returned, but the target object metadata does return
self.app.register('HEAD', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1',
'X-Object-Meta-Color': 'Red'})
self.app.register('HEAD', '/v1/a/c/sym1', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Meta-Color': 'Yellow'})
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk,
{'X-Object-Meta-Color': 'Green'})
req = Request.blank('/v1/a/c/symlink', method='HEAD')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
self.assertNotIn(('X-Symlink-Target', 'c1/o'), headers)
self.assertNotIn(('X-Symlink-Target-Account', 'a2'), headers)
self.assertNotIn(('X-Object-Meta-Color', 'Red'), headers)
self.assertNotIn(('X-Object-Meta-Color', 'Yellow'), headers)
self.assertIn(('X-Object-Meta-Color', 'Green'), headers)
self.assertIn(('Content-Location', '/v1/a/c1/o'), headers)
def test_symlink_post(self):
self.app.register('POST', '/v1/a/c/symlink', swob.HTTPAccepted,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o'})
req = Request.blank('/v1/a/c/symlink', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '307 Temporary Redirect')
self.assertEqual(
body,
b'The requested POST was applied to a symlink. POST '
b'directly to the target to apply requested metadata.')
method, path, hdrs = self.app.calls_with_headers[0]
val = hdrs.get('X-Object-Meta-Color')
self.assertEqual(val, 'Red')
def test_non_symlink_post(self):
self.app.register('POST', '/v1/a/c/o', swob.HTTPAccepted, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '202 Accepted')
def test_set_symlink_POST_fail(self):
# Setting a link with a POST request is not allowed
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Symlink-Target': 'c1/regular_obj'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '400 Bad Request')
self.assertEqual(body, b"A PUT request is required to set a symlink "
b"target")
def test_symlink_post_but_fail_at_server(self):
self.app.register('POST', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Object-Meta-Color': 'Red'})
status, headers, body = self.call_sym(req)
self.assertEqual(status, '404 Not Found')
def test_validate_and_prep_request_headers(self):
def do_test(headers):
req = Request.blank('/v1/a/c/o', method='PUT',
headers=headers)
symlink._validate_and_prep_request_headers(req)
# normal cases
do_test({'X-Symlink-Target': 'c1/o1'})
do_test({'X-Symlink-Target': 'c1/sub/o1'})
do_test({'X-Symlink-Target': 'c1%2Fo1'})
# specify account
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'another'})
# URL encoded is safe
do_test({'X-Symlink-Target': 'c1%2Fo1'})
# URL encoded + multibytes is also safe
target = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
target = swob.bytes_to_wsgi(target.encode('utf8'))
do_test({'X-Symlink-Target': target})
do_test({'X-Symlink-Target': swob.wsgi_quote(target)})
target = swob.bytes_to_wsgi(u'\u30b0\u30e9\u30d6\u30eb'.encode('utf8'))
do_test(
{'X-Symlink-Target': 'cont/obj',
'X-Symlink-Target-Account': target})
do_test(
{'X-Symlink-Target': 'cont/obj',
'X-Symlink-Target-Account': swob.wsgi_quote(target)})
def test_validate_and_prep_request_headers_invalid_format(self):
def do_test(headers, status, err_msg):
req = Request.blank('/v1/a/c/o', method='PUT',
headers=headers)
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, status)
self.assertEqual(cm.exception.body, err_msg)
do_test({'X-Symlink-Target': '/c1/o1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1o1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': '/another'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'an/other'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
# url encoded case
do_test({'X-Symlink-Target': '%2Fc1%2Fo1'},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': '%2Fanother'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test({'X-Symlink-Target': 'c1/o1',
'X-Symlink-Target-Account': 'an%2Fother'},
'412 Precondition Failed',
b'Account name cannot contain slashes')
# with multi-bytes
target = u'/\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
target = swob.bytes_to_wsgi(target.encode('utf8'))
do_test(
{'X-Symlink-Target': target},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
do_test(
{'X-Symlink-Target': swob.wsgi_quote(target)},
'412 Precondition Failed',
b'X-Symlink-Target header must be of the '
b'form <container name>/<object name>')
account = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
account = swob.bytes_to_wsgi(account.encode('utf8'))
do_test(
{'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': account},
'412 Precondition Failed',
b'Account name cannot contain slashes')
do_test(
{'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': swob.wsgi_quote(account)},
'412 Precondition Failed',
b'Account name cannot contain slashes')
def test_validate_and_prep_request_headers_points_to_itself(self):
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o'})
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, '400 Bad Request')
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
# Even if set account to itself, it will fail as well
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': 'a'})
with self.assertRaises(swob.HTTPException) as cm:
symlink._validate_and_prep_request_headers(req)
self.assertEqual(cm.exception.status, '400 Bad Request')
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
# sanity, the case to another account is safe
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'X-Symlink-Target': 'c/o',
'X-Symlink-Target-Account': 'a1'})
symlink._validate_and_prep_request_headers(req)
def test_symloop_max_config(self):
self.app = FakeSwift()
# sanity
self.sym = symlink.filter_factory({
'symloop_max': '1',
})(self.app)
self.assertEqual(self.sym.symloop_max, 1)
# < 1 case will result in default
self.sym = symlink.filter_factory({
'symloop_max': '-1',
})(self.app)
self.assertEqual(self.sym.symloop_max, symlink.DEFAULT_SYMLOOP_MAX)
class SymlinkCopyingTestCase(TestSymlinkMiddlewareBase):
# verify interaction of copy and symlink middlewares
def setUp(self):
self.app = FakeSwift()
conf = {'symloop_max': '2'}
self.sym = symlink.filter_factory(conf)(self.app)
self.sym.logger = self.app.logger
self.copy = copy.filter_factory({})(self.sym)
def call_copy(self, req, **kwargs):
return self.call_app(req, app=self.copy, **kwargs)
def test_copy_symlink_target(self):
req = Request.blank('/v1/a/src_cont/symlink', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink_target(req)
req = Request.blank('/v1/a/tgt_cont/tgt_obj', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink_target(req)
def _test_copy_symlink_target(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('GET', '/v1/a2/c1/o', swob.HTTPOk, {}, 'resp_body')
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{}, 'resp_body')
status, headers, body = self.call_copy(req)
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a2/c1/o')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[2]
self.assertEqual(method, 'PUT')
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
# this is raw object copy
self.assertEqual(val, None)
self.assertEqual(status, '201 Created')
def test_copy_symlink(self):
req = Request.blank(
'/v1/a/src_cont/symlink?symlink=get', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?symlink=get', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink(req)
def _test_copy_symlink(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink?symlink=get')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(
hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'), 'a2')
def test_copy_symlink_new_target(self):
req = Request.blank(
'/v1/a/src_cont/symlink?symlink=get', method='COPY',
headers={'Destination': 'tgt_cont/tgt_obj',
'X-Symlink-Target': 'new_cont/new_obj',
'X-Symlink-Target-Account': 'new_acct'})
self._test_copy_symlink_new_target(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?symlink=get', method='PUT',
headers={'X-Copy-From': 'src_cont/symlink',
'X-Symlink-Target': 'new_cont/new_obj',
'X-Symlink-Target-Account': 'new_acct'})
self._test_copy_symlink_new_target(req)
def _test_copy_symlink_new_target(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
self.assertEqual(path, '/v1/a/src_cont/symlink?symlink=get')
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
self.assertEqual(method, 'PUT')
self.assertEqual(path, '/v1/a/tgt_cont/tgt_obj?symlink=get')
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'new_cont/new_obj')
self.assertEqual(hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'),
'new_acct')
def test_copy_symlink_with_slo_query(self):
req = Request.blank(
'/v1/a/src_cont/symlink?multipart-manifest=get&symlink=get',
method='COPY', headers={'Destination': 'tgt_cont/tgt_obj'})
self._test_copy_symlink_with_slo_query(req)
req = Request.blank(
'/v1/a/tgt_cont/tgt_obj?multipart-manifest=get&symlink=get',
method='PUT', headers={'X-Copy-From': 'src_cont/symlink'})
self._test_copy_symlink_with_slo_query(req)
def _test_copy_symlink_with_slo_query(self, req):
self.app.register('GET', '/v1/a/src_cont/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c1/o',
'X-Object-Sysmeta-Symlink-Target-Account': 'a2'})
self.app.register('PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated,
{'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Account': 'a2'})
status, headers, body = self.call_copy(req)
self.assertEqual(status, '201 Created')
method, path, hdrs = self.app.calls_with_headers[0]
self.assertEqual(method, 'GET')
path, query = path.split('?')
query_dict = parse_qs(query)
self.assertEqual(
path, '/v1/a/src_cont/symlink')
self.assertEqual(
query_dict,
{'multipart-manifest': ['get'], 'symlink': ['get'],
'format': ['raw']})
self.assertEqual('/src_cont/symlink', hdrs.get('X-Copy-From'))
method, path, hdrs = self.app.calls_with_headers[1]
val = hdrs.get('X-Object-Sysmeta-Symlink-Target')
self.assertEqual(val, 'c1/o')
self.assertEqual(
hdrs.get('X-Object-Sysmeta-Symlink-Target-Account'), 'a2')
def test_static_link_to_new_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Size': '1048576',
'X-Object-Sysmeta-Slo-Etag': 'this-is-not-used',
'Content-Length': 42,
'Content-Type': 'application/big-data',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'956859738870e5ca6aa17eeda58e4df0; '
'slo_etag=71e938d37c1d06dc634dd24660255a88',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'1048576')
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'slo_etag=71e938d37c1d06dc634dd24660255a88; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=1048576')
def test_static_link_to_old_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Size': '1048576',
'X-Object-Sysmeta-Slo-Etag': '71e938d37c1d06dc634dd24660255a88',
'Content-Length': 42,
'Content-Type': 'application/big-data',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'1048576')
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'slo_etag=71e938d37c1d06dc634dd24660255a88; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=1048576')
def test_static_link_to_really_old_slo_manifest(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'X-Static-Large-Object': 'True',
'Etag': 'manifest-etag',
'Content-Length': 42,
'Content-Type': 'application/big-data',
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'manifest-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '201 Created')
self.assertEqual([
('HEAD', '/v1/a/c1/o'),
('PUT', '/v1/a/c/symlink'),
], self.app.calls)
method, path, hdrs = self.app.calls_with_headers[-1]
self.assertEqual('application/big-data', hdrs['Content-Type'])
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target'], 'c1/o')
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Etag'],
'manifest-etag')
# symlink m/w is doing a HEAD, it's not going to going to read the
# manifest body and sum up the bytes - so we just use manifest size
self.assertEqual(hdrs['X-Object-Sysmeta-Symlink-Target-Bytes'],
'42')
# no slo_etag, and target_bytes is manifest
self.assertEqual(
hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'],
'd41d8cd98f00b204e9800998ecf8427e; '
'symlink_target=c1/o; '
'symlink_target_etag=manifest-etag; '
'symlink_target_bytes=42')
def test_static_link_to_slo_manifest_slo_etag(self):
self.app.register('HEAD', '/v1/a/c1/o', swob.HTTPOk, {
'Etag': 'manifest-etag',
'X-Object-Sysmeta-Slo-Etag': 'slo-etag',
'Content-Length': 42,
})
self.app.register('PUT', '/v1/a/c/symlink', swob.HTTPCreated, {})
# unquoted slo-etag doesn't match
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': 'slo-etag',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
# the quoted slo-etag is tolerated, but still doesn't match
req = Request.blank('/v1/a/c/symlink', method='PUT',
headers={
'X-Symlink-Target': 'c1/o',
'X-Symlink-Target-Etag': '"slo-etag"',
}, body='')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '409 Conflict')
class SymlinkVersioningTestCase(TestSymlinkMiddlewareBase):
# verify interaction of versioned_writes and symlink middlewares
def setUp(self):
self.app = FakeSwift()
conf = {'symloop_max': '2'}
self.sym = symlink.filter_factory(conf)(self.app)
self.sym.logger = self.app.logger
vw_conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(vw_conf)(self.sym)
def call_vw(self, req, **kwargs):
return self.call_app(req, app=self.vw, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_new_symlink_version_success(self):
self.app.register(
'PUT', '/v1/a/c/symlink', swob.HTTPCreated,
{'X-Symlink-Target': 'new_cont/new_tgt',
'X-Symlink-Target-Account': 'a'}, None)
self.app.register(
'GET', '/v1/a/c/symlink', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT',
'X-Object-Sysmeta-Symlink-Target': 'old_cont/old_tgt',
'X-Object-Sysmeta-Symlink-Target-Account': 'a'},
'')
self.app.register(
'PUT', '/v1/a/ver_cont/007symlink/0000000001.00000',
swob.HTTPCreated,
{'X-Symlink-Target': 'old_cont/old_tgt',
'X-Symlink-Target-Account': 'a'}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/symlink',
headers={'X-Symlink-Target': 'new_cont/new_tgt'},
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '0',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(['VW', 'VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
calls = self.app.calls_with_headers
method, path, req_headers = calls[2]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/symlink', path)
self.assertEqual(
'new_cont/new_tgt',
req_headers['X-Object-Sysmeta-Symlink-Target'])
def test_delete_latest_version_no_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=003sym/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 0, '
'"name": "003sym/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 0, '
'"name": "003sym/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/003sym/2', swob.HTTPCreated,
{'content-length': '0',
'X-Object-Sysmeta-Symlink-Target': 'c/tgt'}, None)
self.app.register(
'PUT', '/v1/a/c/sym', swob.HTTPCreated,
{'X-Symlink-Target': 'c/tgt', 'X-Symlink-Target-Account': 'a'},
None)
self.app.register(
'DELETE', '/v1/a/ver_cont/003sym/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/sym',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(4, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW'], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
calls = self.app.calls_with_headers
method, path, req_headers = calls[2]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/sym', path)
self.assertEqual(
'c/tgt',
req_headers['X-Object-Sysmeta-Symlink-Target'])
class TestSymlinkContainerContext(TestSymlinkMiddlewareBase):
def setUp(self):
super(TestSymlinkContainerContext, self).setUp()
self.context = symlink.SymlinkContainerContext(
self.sym.app, self.sym.logger)
def test_extract_symlink_path_json_simple_etag(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag')
self.assertNotIn('symlink_path', obj_dict)
def test_extract_symlink_path_json_symlink_path(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; something_else=foo; "
"symlink_target_etag=tgt_etag; symlink_target_bytes=8",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag; something_else=foo')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a/c/o')
self.assertEqual(obj_dict['symlink_etag'], 'tgt_etag')
self.assertEqual(obj_dict['symlink_bytes'], 8)
def test_extract_symlink_path_json_symlink_path_and_account(self):
obj_dict = {
"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; symlink_target_account=AUTH_a2",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a2/c/o')
def test_extract_symlink_path_json_extra_key(self):
obj_dict = {"bytes": 6,
"last_modified": "1",
"hash": "etag; symlink_target=c/o; extra_key=value",
"name": "obj",
"content_type": "application/octet-stream"}
obj_dict = self.context._extract_symlink_path_json(
obj_dict, 'v1', 'AUTH_a')
self.assertEqual(obj_dict['hash'], 'etag; extra_key=value')
self.assertEqual(obj_dict['symlink_path'], '/v1/AUTH_a/c/o')
def test_get_container_simple(self):
self.app.register(
'GET',
'/v1/a/c',
swob.HTTPOk, {},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
req = Request.blank(path='/v1/a/c')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertIn('symlink_path', obj_list[0])
self.assertIn(obj_list[0]['symlink_path'], '/v1/a/c/o')
self.assertNotIn('symlink_path', obj_list[1])
def test_get_container_with_subdir(self):
self.app.register(
'GET',
'/v1/a/c?delimiter=/',
swob.HTTPOk, {},
json.dumps([{"subdir": "photos/"}]))
req = Request.blank(path='/v1/a/c?delimiter=/')
status, headers, body = self.call_sym(req)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertEqual(len(obj_list), 1)
self.assertEqual(obj_list[0]['subdir'], 'photos/')
def test_get_container_error_cases(self):
# No affect for error cases
for error in (swob.HTTPNotFound, swob.HTTPUnauthorized,
swob.HTTPServiceUnavailable,
swob.HTTPInternalServerError):
self.app.register('GET', '/v1/a/c', error, {}, '')
req = Request.blank(path='/v1/a/c')
status, headers, body = self.call_sym(req)
self.assertEqual(status, error().status)
def test_no_affect_for_account_request(self):
with mock.patch.object(self.sym, 'app') as mock_app:
mock_app.return_value = (b'ok',)
req = Request.blank(path='/v1/a')
status, headers, body = self.call_sym(req)
self.assertEqual(body, b'ok')
def test_get_container_simple_with_listing_format(self):
self.app.register(
'GET',
'/v1/a/c?format=json',
swob.HTTPOk, {},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
self.lf = listing_formats.filter_factory({})(self.sym)
req = Request.blank(path='/v1/a/c?format=json')
status, headers, body = self.call_app(req, app=self.lf)
self.assertEqual(status, '200 OK')
obj_list = json.loads(body)
self.assertIn('symlink_path', obj_list[0])
self.assertIn(obj_list[0]['symlink_path'], '/v1/a/c/o')
self.assertNotIn('symlink_path', obj_list[1])
def test_get_container_simple_with_listing_format_xml(self):
self.app.register(
'GET',
'/v1/a/c?format=json',
swob.HTTPOk, {'Content-Type': 'application/json'},
json.dumps(
[{"hash": "etag; symlink_target=c/o;",
"last_modified": "2014-11-21T14:23:02.206740",
"bytes": 0,
"name": "sym_obj",
"content_type": "text/plain"},
{"hash": "etag2",
"last_modified": "2014-11-21T14:14:27.409100",
"bytes": 32,
"name": "normal_obj",
"content_type": "text/plain"}]))
self.lf = listing_formats.filter_factory({})(self.sym)
req = Request.blank(path='/v1/a/c?format=xml')
status, headers, body = self.call_app(req, app=self.lf)
self.assertEqual(status, '200 OK')
self.assertEqual(body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<container name="c"><object><name>sym_obj</name>'
b'<hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>2014-11-21T14:23:02.206740</last_modified>'
b'</object>'
b'<object><name>normal_obj</name><hash>etag2</hash>'
b'<bytes>32</bytes><content_type>text/plain</content_type>'
b'<last_modified>2014-11-21T14:14:27.409100</last_modified>'
b'</object></container>'])
| swift-master | test/unit/common/middleware/test_symlink.py |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hmac
import hashlib
import unittest
from time import time
import six
if six.PY3:
from unittest import mock
else:
import mock
from io import BytesIO
from swift.common.swob import Request, Response, wsgi_quote
from swift.common.middleware import tempauth, formpost
from swift.common.middleware.tempurl import DEFAULT_ALLOWED_DIGESTS
from swift.common.utils import split_path
from swift.common import registry, digest as digest_utils
from swift.proxy.controllers.base import get_cache_key
from test.debug_logger import debug_logger
def hmac_msg(path, redirect, max_file_size, max_file_count, expires):
msg = '%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires)
if six.PY3:
msg = msg.encode('utf-8')
return msg
class FakeApp(object):
def __init__(self, status_headers_body_iter=None,
check_no_query_string=True):
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, b'')])
self.requests = []
self.check_no_query_string = check_no_query_string
def __call__(self, env, start_response):
# use wsgi_quote to spot check that it really *is* a WSGI string
wsgi_quote(env['PATH_INFO'])
try:
if self.check_no_query_string and env.get('QUERY_STRING'):
raise Exception('Query string %s should have been discarded!' %
env['QUERY_STRING'])
body = b''
while True:
chunk = env['wsgi.input'].read()
if not chunk:
break
body += chunk
env['wsgi.input'] = BytesIO(body)
self.requests.append(Request.blank('', environ=env))
if env.get('swift.authorize_override') and \
env.get('REMOTE_USER') != '.wsgi.pre_authed':
raise Exception(
'Invalid REMOTE_USER %r with swift.authorize_override' % (
env.get('REMOTE_USER'),))
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.requests[-1])
if resp:
return resp(env, start_response)
status, headers, body = next(self.status_headers_body_iter)
return Response(status=status, headers=headers,
body=body)(env, start_response)
except EOFError:
start_response('499 Client Disconnect',
[('Content-Type', 'text/plain')])
return [b'Client Disconnect\n']
class TestCappedFileLikeObject(unittest.TestCase):
def test_whole(self):
self.assertEqual(
formpost._CappedFileLikeObject(BytesIO(b'abc'), 10).read(),
b'abc')
def test_exceeded(self):
exc = None
try:
formpost._CappedFileLikeObject(BytesIO(b'abc'), 2).read()
except EOFError as err:
exc = err
self.assertEqual(str(exc), 'max_file_size exceeded')
def test_whole_readline(self):
fp = formpost._CappedFileLikeObject(BytesIO(b'abc\ndef'), 10)
self.assertEqual(fp.readline(), b'abc\n')
self.assertEqual(fp.readline(), b'def')
self.assertEqual(fp.readline(), b'')
def test_exceeded_readline(self):
fp = formpost._CappedFileLikeObject(BytesIO(b'abc\ndef'), 5)
self.assertEqual(fp.readline(), b'abc\n')
exc = None
try:
self.assertEqual(fp.readline(), b'def')
except EOFError as err:
exc = err
self.assertEqual(str(exc), 'max_file_size exceeded')
def test_read_sized(self):
fp = formpost._CappedFileLikeObject(BytesIO(b'abcdefg'), 10)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
class TestFormPost(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
self.logger = self.formpost.logger = debug_logger()
def _make_request(self, path, tempurl_keys=(), **kwargs):
req = Request.blank(path, **kwargs)
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
_junk, account, _junk, _junk = split_path(path, 2, 4)
req.environ.setdefault('swift.infocache', {})
req.environ['swift.infocache'][get_cache_key(account)] = \
self._fake_cache_env(account, tempurl_keys)
return req
def _fake_cache_env(self, account, tempurl_keys=()):
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
return {'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
def _make_sig_env_body(self, path, redirect, max_file_size, max_file_count,
expires, key, user_agent=True, algorithm='sha512',
prefix=True):
alg_name = algorithm
if six.PY2:
algorithm = getattr(hashlib, algorithm)
mac = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
algorithm)
if prefix:
if six.PY2:
sig = alg_name + ':' + base64.b64encode(mac.digest())
else:
sig = alg_name + ':' + base64.b64encode(
mac.digest()).decode('ascii')
else:
sig = mac.hexdigest()
body = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'Content-Encoding: gzip',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]
if six.PY3:
body = [line.encode('utf-8') for line in body]
wsgi_errors = six.StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {},
'wsgi.errors': wsgi_errors,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
if user_agent is False:
del env['HTTP_USER_AGENT']
return sig, env, body
def test_passthrough(self):
for method in ('HEAD', 'GET', 'PUT', 'POST', 'DELETE'):
resp = self._make_request(
'/v1/a/c/o',
environ={'REQUEST_METHOD': method}).get_response(self.formpost)
self.assertEqual(resp.status_int, 401)
self.assertNotIn(b'FormPost', resp.body)
def test_auth_scheme(self):
# FormPost rejects
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
authenticate_v = None
for h, v in headers:
if h.lower() == 'www-authenticate':
authenticate_v = v
self.assertTrue(b'FormPost: Form Expired' in body)
self.assertEqual('Swift realm="unknown"', authenticate_v)
def test_safari(self):
key = b'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
wsgi_input = BytesIO(wsgi_input)
wsgi_errors = six.StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {
get_cache_key('AUTH_test'): self._fake_cache_env(
'AUTH_test', [key]),
get_cache_key('AUTH_test', 'container'): {
'meta': {}}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=201&message=' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_firefox(self):
key = b'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------168072824752491622650073--',
''
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
wsgi_input = BytesIO(wsgi_input)
wsgi_errors = six.StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------168072824752491622650073',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; '
'rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {
get_cache_key('AUTH_test'): self._fake_cache_env(
'AUTH_test', [key]),
get_cache_key('AUTH_test', 'container'): {
'meta': {}}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=201&message=' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_chrome(self):
key = b'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA--',
''
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
wsgi_input = BytesIO(wsgi_input)
wsgi_errors = six.StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'null',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/535.7 (KHTML, like Gecko) '
'Chrome/16.0.912.63 Safari/535.7',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {
get_cache_key('AUTH_test'): self._fake_cache_env(
'AUTH_test', [key]),
get_cache_key('AUTH_test', 'container'): {
'meta': {}}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=201&message=' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_explorer(self):
key = b'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file1"; '
'filename="C:\\testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file2"; '
'filename="C:\\testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------7db20d93017c--',
''
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
wsgi_input = BytesIO(wsgi_input)
wsgi_errors = six.StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------7db20d93017c',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-US',
'HTTP_ACCEPT': 'text/html, application/xhtml+xml, */*',
'HTTP_CACHE_CONTROL': 'no-cache',
'HTTP_CONNECTION': 'Keep-Alive',
'HTTP_HOST': '172.16.83.128:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT '
'6.1; WOW64; Trident/5.0)',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.129',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {
get_cache_key('AUTH_test'): self._fake_cache_env(
'AUTH_test', [key]),
get_cache_key('AUTH_test', 'container'): {
'meta': {}}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=201&message=' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_curl_with_unicode(self):
key = b'abc'
path = u'/v1/AUTH_test/container/let_it_\N{SNOWFLAKE}/'
if six.PY2:
path = path.encode('utf-8')
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="file1"; '
'filename="\xe2\x98\x83.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'--------------------------dea19ac8502ca805--',
''
])
if not six.PY2:
wsgi_input = wsgi_input.encode('latin1')
wsgi_input = BytesIO(wsgi_input)
wsgi_errors = six.StringIO()
env = {
'CONTENT_LENGTH': str(len(wsgi_input.getvalue())),
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=------------------------dea19ac8502ca805',
'HTTP_ACCEPT': '*/*',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_USER_AGENT': 'curl/7.58.0',
'PATH_INFO': '/v1/AUTH_test/container/let_it_\xE2\x9D\x84/',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.infocache': {
get_cache_key('AUTH_test'): self._fake_cache_env(
'AUTH_test', [key]),
get_cache_key('AUTH_test', 'container'): {
'meta': {}}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other', body)
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=201&message=' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(
self.app.requests[0].path,
'/v1/AUTH_test/container/let_it_%E2%9D%84/%E2%98%83.txt')
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(
self.app.requests[1].path,
'/v1/AUTH_test/container/let_it_%E2%9D%84/testfile2.txt')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_messed_up_start(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def log_assert_int_status(env, response_status_int):
self.assertIsInstance(response_status_int, int)
self.formpost._log_request = log_assert_int_status
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
self.assertIsNone(exc_info)
self.assertIn(b'FormPost: invalid starting boundary', body)
self.assertEqual(len(self.app.requests), 0)
def test_max_file_size_exceeded(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
self.assertIsNone(exc_info)
self.assertIn(b'FormPost: max_file_size exceeded', body)
self.assertEqual(len(self.app.requests), 0)
def test_max_file_count_exceeded(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 1,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(
location,
'http://brim.net?status=400&message=max%20file%20count%20exceeded')
self.assertIsNone(exc_info)
self.assertTrue(
b'http://brim.net?status=400&message=max%20file%20count%20exceeded'
in body)
self.assertEqual(len(self.app.requests), 1)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
def test_subrequest_does_not_pass_query(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['QUERY_STRING'] = 'this=should¬=get&passed'
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(
iter([('201 Created', {}, b''),
('201 Created', {}, b'')]),
check_no_query_string=True)
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
# Make sure we 201 Created, which means we made the final subrequest
# (and FakeApp verifies that no QUERY_STRING got passed).
self.assertEqual(status, '201 Created')
self.assertIsNone(exc_info)
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
def test_subrequest_fails_redirect_404(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('404 Not Found', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://brim.net?status=404&message=')
self.assertIsNone(exc_info)
self.assertTrue(b'http://brim.net?status=404&message=' in body)
self.assertEqual(len(self.app.requests), 1)
def test_subrequest_fails_no_redirect_503(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('503 Server Error', {}, b'some bad news')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '503 Server Error')
self.assertTrue(b'bad news' in body)
self.assertEqual(len(self.app.requests), 1)
def test_truncated_attr_value(self):
key = b'abc'
redirect = 'a' * formpost.MAX_VALUE_LENGTH
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
# Tack on an extra char to redirect, but shouldn't matter since it
# should get truncated off on read.
redirect += 'b'
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(
location,
('a' * formpost.MAX_VALUE_LENGTH) + '?status=201&message=')
self.assertIsNone(exc_info)
self.assertIn(
(b'a' * formpost.MAX_VALUE_LENGTH) + b'?status=201&message=', body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_no_file_to_process(self):
key = b'abc'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
])
if six.PY3:
wsgi_input = wsgi_input.encode('utf-8')
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(
location,
'http://brim.net?status=400&message=no%20files%20to%20process')
self.assertIsNone(exc_info)
self.assertTrue(
b'http://brim.net?status=400&message=no%20files%20to%20process'
in body)
self.assertEqual(len(self.app.requests), 0)
def test_formpost_without_useragent(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def start_response(s, h, e=None):
pass
body = b''.join(self.formpost(env, start_response))
self.assertIn('User-Agent', self.app.requests[0].headers)
self.assertEqual(self.app.requests[0].headers['User-Agent'],
'FormPost')
def test_formpost_with_origin(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
env['HTTP_ORIGIN'] = 'http://localhost:5000'
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created',
{'Access-Control-Allow-Origin':
'http://localhost:5000'}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
headers = {}
def start_response(s, h, e=None):
for k, v in h:
headers[k] = v
pass
body = b''.join(self.formpost(env, start_response))
self.assertEqual(headers['Access-Control-Allow-Origin'],
'http://localhost:5000')
def test_formpost_with_multiple_keys(self):
key = b'ernie'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
# Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = b''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_formpost_with_multiple_container_keys(self):
first_key = b'ernie'
second_key = b'bert'
keys = [first_key, second_key]
meta = {}
for idx, key in enumerate(keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
for key in keys:
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test'))
# Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': meta}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = b''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_redirect(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://redirect?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(location.encode('utf-8') in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_redirect_with_query(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location,
'http://redirect?one=two&status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(location.encode('utf-8') in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_no_redirect(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
def test_prefixed_and_not_prefixed_sigs_good(self):
def do_test(digest, prefixed):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10,
int(time() + 86400), key, algorithm=digest, prefix=prefixed)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.auth.app = app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(app.requests), 2)
self.assertEqual(app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(app.requests[1].body, b'Test\nFile\nTwo\n')
for digest in ('sha1', 'sha256', 'sha512'):
do_test(digest, True)
do_test(digest, False)
# NB: one increment per *upload*, not client request
self.assertEqual(self.logger.statsd_client.get_increment_counts(), {
'formpost.digests.sha1': 4,
'formpost.digests.sha256': 4,
'formpost.digests.sha512': 4,
})
def test_prefixed_and_not_prefixed_sigs_unsupported(self):
def do_test(digest, prefixed):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10,
int(time() + 86400), key, algorithm=digest, prefix=prefixed)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
def start_response(s, h, e=None):
status[0] = s
body = b''.join(self.formpost(env, start_response))
status = status[0]
self.assertEqual(status, '401 Unauthorized')
for digest in ('md5', 'sha224'):
do_test(digest, True)
do_test(digest, False)
def test_no_redirect_expired(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Form Expired' in body)
def test_no_redirect_invalid_sig(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
# Change key to invalidate sig
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key + b' is bogus now']))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_no_redirect_with_error(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'XX' + b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: invalid starting boundary' in body)
def test_redirect_allowed_deprecated_and_unsupported_digests(self):
logger = debug_logger()
def do_test(digest):
logger.clear()
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, algorithm=digest)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
with mock.patch('swift.common.middleware.formpost.get_logger',
return_value=logger):
self.formpost = formpost.filter_factory(
{
'allowed_digests': DEFAULT_ALLOWED_DIGESTS})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
return body, status[0], headers[0], exc_info[0]
for algorithm in ('sha1', 'sha256', 'sha512'):
body, status, headers, exc_info = do_test(algorithm)
self.assertEqual(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEqual(location, 'http://redirect?status=201&message=')
self.assertIsNone(exc_info)
self.assertTrue(location.encode('utf-8') in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
if algorithm in digest_utils.DEPRECATED_DIGESTS:
self.assertIn(
'The following digest algorithms are configured but '
'deprecated: %s. Support will be removed in a '
'future release.' % algorithm,
logger.get_lines_for_level('warning'))
# unsupported
_body, status, _headers, _exc_info = do_test("md5")
self.assertEqual(status, '401 Unauthorized')
def test_no_v1(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_empty_v1(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'//AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_empty_account(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1//container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_wrong_account(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([
('200 Ok', {'x-account-meta-temp-url-key': 'def'}, b''),
('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_no_container(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: Invalid Signature' in body)
def test_completely_non_int_expires(self):
key = b'abc'
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, expires, key)
for i, v in enumerate(body):
if v.decode('utf-8') == str(expires):
body[i] = b'badvalue'
break
env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertIsNone(location)
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: expired not an integer' in body)
def test_x_delete_at(self):
delete_at = int(time() + 100)
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
if six.PY3:
x_delete_body_part = [line.encode('utf-8')
for line in x_delete_body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(x_delete_body_part + body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertIn("X-Delete-At", self.app.requests[0].headers)
self.assertIn("X-Delete-At", self.app.requests[1].headers)
self.assertEqual(delete_at,
self.app.requests[0].headers["X-Delete-At"])
self.assertEqual(delete_at,
self.app.requests[1].headers["X-Delete-At"])
def test_x_delete_at_not_int(self):
delete_at = "2014-07-16"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
if six.PY3:
x_delete_body_part = [line.encode('utf-8')
for line in x_delete_body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(x_delete_body_part + body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
self.assertTrue(b'FormPost: x_delete_at not an integer' in body)
def test_x_delete_after(self):
delete_after = 100
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
if six.PY3:
x_delete_body_part = [line.encode('utf-8')
for line in x_delete_body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(x_delete_body_part + body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertIn("X-Delete-After", self.app.requests[0].headers)
self.assertIn("X-Delete-After", self.app.requests[1].headers)
self.assertEqual(delete_after,
self.app.requests[0].headers["X-Delete-After"])
self.assertEqual(delete_after,
self.app.requests[1].headers["X-Delete-After"])
def test_x_delete_after_not_int(self):
delete_after = "2 days"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
if six.PY3:
x_delete_body_part = [line.encode('utf-8')
for line in x_delete_body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(x_delete_body_part + body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '400 Bad Request')
self.assertTrue(b'FormPost: x_delete_after not an integer' in body)
def test_global_content_type_encoding(self):
body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="content-encoding"',
'',
'gzip',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="content-type"',
'',
'text/html',
]
if six.PY3:
body_part = [line.encode('utf-8') for line in body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(body_part + body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertIn("Content-Type", self.app.requests[0].headers)
self.assertIn("Content-Type", self.app.requests[1].headers)
self.assertIn("Content-Encoding", self.app.requests[0].headers)
self.assertIn("Content-Encoding", self.app.requests[1].headers)
self.assertEqual("text/html",
self.app.requests[0].headers["Content-Type"])
self.assertEqual("text/html",
self.app.requests[1].headers["Content-Type"])
self.assertEqual("gzip",
self.app.requests[0].headers["Content-Encoding"])
self.assertEqual("gzip",
self.app.requests[1].headers["Content-Encoding"])
def test_single_content_type_encoding(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
wsgi_input = b'\r\n'.join(body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b''),
('201 Created', {}, b'')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEqual(status, '201 Created')
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 2)
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
self.assertIn("Content-Type", self.app.requests[0].headers)
self.assertIn("Content-Type", self.app.requests[1].headers)
self.assertEqual("text/plain",
self.app.requests[0].headers["Content-Type"])
self.assertEqual("text/plain",
self.app.requests[1].headers["Content-Type"])
self.assertFalse("Content-Encoding" in self.app.requests[0].headers)
self.assertIn("Content-Encoding", self.app.requests[1].headers)
self.assertEqual("gzip",
self.app.requests[1].headers["Content-Encoding"])
def test_multiple_content_type_encoding(self):
body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file4"; '
'filename="testfile4.txt"',
'Content-Type: application/json',
'',
'{"four": 4}\n',
]
if six.PY3:
body_part = [line.encode('utf-8') for line in body_part]
key = b'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
# splice in another file with a different content type
before_closing_boundary = len(body) - 2
body[before_closing_boundary:before_closing_boundary] = body_part
wsgi_input = b'\r\n'.join(body)
env['wsgi.input'] = BytesIO(wsgi_input)
env['swift.infocache'][get_cache_key('AUTH_test')] = (
self._fake_cache_env('AUTH_test', [key]))
env['swift.infocache'][get_cache_key(
'AUTH_test', 'container')] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, b'')] * 3))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = b''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
self.assertEqual(status, '201 Created')
self.assertTrue(b'201 Created' in body)
self.assertEqual(len(self.app.requests), 3)
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
self.assertEqual(self.app.requests[2].body, b'{"four": 4}\n')
self.assertIn("Content-Type", self.app.requests[0].headers)
self.assertIn("Content-Type", self.app.requests[1].headers)
self.assertIn("Content-Type", self.app.requests[2].headers)
self.assertEqual("text/plain",
self.app.requests[0].headers["Content-Type"])
self.assertEqual("text/plain",
self.app.requests[1].headers["Content-Type"])
self.assertEqual("application/json",
self.app.requests[2].headers["Content-Type"])
self.assertFalse("Content-Encoding" in self.app.requests[0].headers)
self.assertIn("Content-Encoding", self.app.requests[1].headers)
self.assertEqual("gzip",
self.app.requests[1].headers["Content-Encoding"])
self.assertFalse("Content-Encoding" in self.app.requests[2].headers)
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
formpost.filter_factory({})
swift_info = registry.get_swift_info()
self.assertIn('formpost', swift_info)
info = swift_info['formpost']
self.assertIn('allowed_digests', info)
self.assertIn('deprecated_digests', info)
self.assertEqual(info['allowed_digests'], ['sha1', 'sha256', 'sha512'])
self.assertEqual(info['deprecated_digests'], ['sha1'])
def test_non_default_methods(self):
logger = debug_logger()
with mock.patch('swift.common.middleware.formpost.get_logger',
return_value=logger):
formpost.filter_factory({
'allowed_digests': 'sha1 sha512 md5 not-a-valid-digest',
})
swift_info = registry.get_swift_info()
self.assertIn('formpost', swift_info)
info = swift_info['formpost']
self.assertIn('allowed_digests', info)
self.assertIn('deprecated_digests', info)
self.assertEqual(info['allowed_digests'], ['sha1', 'sha512'])
self.assertEqual(info['deprecated_digests'], ['sha1'])
warning_lines = logger.get_lines_for_level('warning')
self.assertIn(
'The following digest algorithms are configured '
'but not supported:',
warning_lines[0])
self.assertIn('not-a-valid-digest', warning_lines[0])
self.assertIn('md5', warning_lines[0])
def test_no_deprecated_digests(self):
formpost.filter_factory({'allowed_digests': 'sha256 sha512'})
swift_info = registry.get_swift_info()
self.assertIn('formpost', swift_info)
info = swift_info['formpost']
self.assertIn('allowed_digests', info)
self.assertNotIn('deprecated_digests', info)
self.assertEqual(info['allowed_digests'], ['sha256', 'sha512'])
def test_bad_config(self):
with self.assertRaises(ValueError):
formpost.filter_factory({
'allowed_digests': 'md4',
})
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_formpost.py |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Unit tests for Name_check filter
Created on February 29, 2012
@author: eamonn-otoole
'''
import numbers
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import name_check
from swift.common import registry
MAX_LENGTH = 255
FORBIDDEN_CHARS = '\'\"<>`'
FORBIDDEN_REGEXP = r"/\./|/\.\./|/\.$|/\.\.$"
class FakeApp(object):
def __call__(self, env, start_response):
return Response(body="OK")(env, start_response)
class TestNameCheckMiddleware(unittest.TestCase):
def setUp(self):
self.conf = {'maximum_length': MAX_LENGTH, 'forbidden_chars':
FORBIDDEN_CHARS, 'forbidden_regexp': FORBIDDEN_REGEXP}
self.test_check = name_check.filter_factory(self.conf)(FakeApp())
def test_valid_length_and_character(self):
path = '/V1.0/' + 'c' * (MAX_LENGTH - 6)
resp = Request.blank(path, environ={'REQUEST_METHOD': 'PUT'}
).get_response(self.test_check)
self.assertEqual(resp.body, b'OK')
def test_invalid_character(self):
for c in self.conf['forbidden_chars']:
path = '/V1.0/1234' + c + '5'
resp = Request.blank(
path, environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.test_check)
self.assertEqual(
resp.body,
("Object/Container/Account name contains forbidden chars "
"from %s" % self.conf['forbidden_chars']).encode('utf8'))
self.assertEqual(resp.status_int, 400)
def test_maximum_length_from_config(self):
# test invalid length
app = name_check.filter_factory({'maximum_length': "500"})(FakeApp())
path = '/V1.0/a/c/' + 'o' * (500 - 9)
resp = Request.blank(path, environ={'REQUEST_METHOD': 'PUT'}
).get_response(app)
self.assertEqual(
resp.body,
("Object/Container/Account name longer than the allowed "
"maximum 500").encode('utf-8'))
self.assertEqual(resp.status_int, 400)
# test valid length
path = '/V1.0/a/c/' + 'o' * (500 - 10)
resp = Request.blank(path, environ={'REQUEST_METHOD': 'PUT'}
).get_response(app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, b'OK')
def test_invalid_length(self):
path = '/V1.0/' + 'c' * (MAX_LENGTH - 5)
resp = Request.blank(path, environ={'REQUEST_METHOD': 'PUT'}
).get_response(self.test_check)
self.assertEqual(
resp.body,
("Object/Container/Account name longer than the allowed maximum %s"
% self.conf['maximum_length']).encode('utf-8'))
self.assertEqual(resp.status_int, 400)
def test_invalid_regexp(self):
for s in [r'/.', r'/..', r'/./foo', r'/../foo']:
path = '/V1.0/' + s
resp = Request.blank(
path, environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.test_check)
self.assertEqual(
resp.body,
("Object/Container/Account name contains a forbidden "
"substring from regular expression %s"
% self.conf['forbidden_regexp']).encode('utf-8'))
self.assertEqual(resp.status_int, 400)
def test_valid_regexp(self):
for s in [r'/...', r'/.\.', r'/foo']:
path = '/V1.0/' + s
resp = Request.blank(
path, environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.test_check)
self.assertEqual(resp.body, b'OK')
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
name_check.filter_factory({})(FakeApp())
swift_info = registry.get_swift_info()
self.assertTrue('name_check' in swift_info)
self.assertTrue(isinstance(
swift_info['name_check'].get('maximum_length'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['name_check'].get('forbidden_chars'),
str))
self.assertTrue(isinstance(
swift_info['name_check'].get('forbidden_regexp'),
str))
def test_registered_configured_options(self):
conf = {'maximum_length': 512,
'forbidden_chars': '\'\"`',
'forbidden_regexp': r"/\./|/\.\./|/\.$"}
name_check.filter_factory(conf)(FakeApp())
swift_info = registry.get_swift_info()
self.assertTrue('name_check' in swift_info)
self.assertEqual(swift_info['name_check'].get('maximum_length'), 512)
self.assertEqual(set(swift_info['name_check'].get('forbidden_chars')),
set('\'\"`'))
self.assertEqual(swift_info['name_check'].get('forbidden_regexp'),
r"/\./|/\.\./|/\.$")
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_name_check.py |
# Copyright (c) 2022 NVIDIA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Used by get_swift_info and register_swift_info to store information about
# the swift cluster.
import time
import unittest
from collections import defaultdict
import mock
from swift.common.middleware import backend_ratelimit
from swift.common.middleware.backend_ratelimit import \
BackendRateLimitMiddleware
from swift.common.swob import Request, HTTPOk
from test.debug_logger import debug_logger
from test.unit.common.middleware.helpers import FakeSwift
class FakeApp(object):
def __init__(self):
self.calls = []
def __call__(self, env, start_response):
start_response('200 OK', {})
return ['']
class TestBackendRatelimitMiddleware(unittest.TestCase):
def setUp(self):
super(TestBackendRatelimitMiddleware, self).setUp()
self.swift = FakeSwift()
def test_init(self):
conf = {}
factory = backend_ratelimit.filter_factory(conf)
rl = factory(self.swift)
self.assertEqual(0.0, rl.requests_per_device_per_second)
self.assertEqual(1.0, rl.requests_per_device_rate_buffer)
conf = {'requests_per_device_per_second': 1.3,
'requests_per_device_rate_buffer': 2.4}
factory = backend_ratelimit.filter_factory(conf)
rl = factory(self.swift)
self.assertEqual(1.3, rl.requests_per_device_per_second)
self.assertEqual(2.4, rl.requests_per_device_rate_buffer)
conf = {'requests_per_device_per_second': -1}
factory = backend_ratelimit.filter_factory(conf)
with self.assertRaises(ValueError) as cm:
factory(self.swift)
self.assertEqual(
'Value must be a non-negative float number, not "-1.0".',
str(cm.exception))
conf = {'requests_per_device_rate_buffer': -1}
factory = backend_ratelimit.filter_factory(conf)
with self.assertRaises(ValueError):
factory(self.swift)
self.assertEqual(
'Value must be a non-negative float number, not "-1.0".',
str(cm.exception))
def _do_test_ratelimit(self, method, req_per_sec, rate_buffer):
# send 20 requests, time increments by 0.01 between each request
start = time.time()
fake_time = [start]
def mock_time():
return fake_time[0]
app = FakeSwift()
logger = debug_logger()
# apply a ratelimit
conf = {'requests_per_device_per_second': req_per_sec,
'requests_per_device_rate_buffer': rate_buffer}
rl = BackendRateLimitMiddleware(app, conf, logger)
success = defaultdict(int)
ratelimited = 0
with mock.patch('swift.common.utils.time.time', mock_time):
for i in range(20):
for dev in ['sda1', 'sda2', 'sda3']:
req = Request.blank('/%s/99/a/c/o' % dev,
environ={'REQUEST_METHOD': method})
app.register(method, req.path, HTTPOk, {})
resp = req.get_response(rl)
if resp.status_int == 200:
success[dev] += 1
else:
self.assertEqual(529, resp.status_int)
self.assertTrue(resp.status.startswith(
'529 Too Many Backend Requests'))
ratelimited += 1
fake_time[0] += 0.01
self.assertEqual(
ratelimited,
logger.statsd_client.get_increment_counts().get(
'backend.ratelimit', 0))
return success
def test_ratelimited(self):
def do_test_ratelimit(method):
# no rate-limiting
success_per_dev = self._do_test_ratelimit(method, 0, 0)
self.assertEqual([20] * 3, list(success_per_dev.values()))
# rate-limited
success_per_dev = self._do_test_ratelimit(method, 1, 0)
self.assertEqual([1] * 3, list(success_per_dev.values()))
success_per_dev = self._do_test_ratelimit(method, 10, 0)
self.assertEqual([2] * 3, list(success_per_dev.values()))
success_per_dev = self._do_test_ratelimit(method, 101, 0)
self.assertEqual([20] * 3, list(success_per_dev.values()))
# startup burst of 1 seconds allowance plus current allowance...
success_per_dev = self._do_test_ratelimit(method, 1, 1)
self.assertEqual([2] * 3, list(success_per_dev.values()))
success_per_dev = self._do_test_ratelimit(method, 10, 1)
self.assertEqual([12] * 3, list(success_per_dev.values()))
do_test_ratelimit('GET')
do_test_ratelimit('HEAD')
do_test_ratelimit('PUT')
do_test_ratelimit('POST')
do_test_ratelimit('DELETE')
do_test_ratelimit('UPDATE')
do_test_ratelimit('REPLICATE')
def test_not_ratelimited(self):
def do_test_no_ratelimit(method):
# verify no rate-limiting
success_per_dev = self._do_test_ratelimit(method, 1, 0)
self.assertEqual([20] * 3, list(success_per_dev.values()))
do_test_no_ratelimit('OPTIONS')
do_test_no_ratelimit('SSYNC')
def test_unhandled_request(self):
app = FakeSwift()
logger = debug_logger()
conf = {'requests_per_device_per_second': 1,
'requests_per_device_rate_buffer': 1}
def do_test(path):
rl = BackendRateLimitMiddleware(app, conf, logger)
req = Request.blank(path)
app.register('GET', req.path, HTTPOk, {})
for i in range(10):
resp = req.get_response(rl)
self.assertEqual(200, resp.status_int)
self.assertEqual(
0, logger.statsd_client.get_increment_counts().get(
'backend.ratelimit', 0))
do_test('/recon/version')
do_test('/healthcheck')
do_test('/v1/a/c/o')
| swift-master | test/unit/common/middleware/test_backend_ratelimit.py |
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from swift.common.swob import Request, HTTPOk, HTTPNoContent
from swift.common.middleware import listing_formats
from swift.common.request_helpers import get_reserved_name
from test.debug_logger import debug_logger
from test.unit.common.middleware.helpers import FakeSwift
class TestListingFormats(unittest.TestCase):
def setUp(self):
self.fake_swift = FakeSwift()
self.logger = debug_logger('test-listing')
self.app = listing_formats.ListingFilter(self.fake_swift, {},
logger=self.logger)
self.fake_account_listing = json.dumps([
{'name': 'bar', 'bytes': 0, 'count': 0,
'last_modified': '1970-01-01T00:00:00.000000'},
{'subdir': 'foo_'},
]).encode('ascii')
self.fake_container_listing = json.dumps([
{'name': 'bar', 'hash': 'etag', 'bytes': 0,
'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'subdir': 'foo/'},
]).encode('ascii')
self.fake_account_listing_with_reserved = json.dumps([
{'name': 'bar', 'bytes': 0, 'count': 0,
'last_modified': '1970-01-01T00:00:00.000000'},
{'name': get_reserved_name('bar', 'versions'), 'bytes': 0,
'count': 0, 'last_modified': '1970-01-01T00:00:00.000000'},
{'subdir': 'foo_'},
{'subdir': get_reserved_name('foo_')},
]).encode('ascii')
self.fake_container_listing_with_reserved = json.dumps([
{'name': 'bar', 'hash': 'etag', 'bytes': 0,
'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'name': get_reserved_name('bar', 'extra data'), 'hash': 'etag',
'bytes': 0, 'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'subdir': 'foo/'},
{'subdir': get_reserved_name('foo/')},
]).encode('ascii')
def test_valid_account(self):
self.fake_swift.register('GET', '/v1/a', HTTPOk, {
'Content-Length': str(len(self.fake_account_listing)),
'Content-Type': 'application/json'}, self.fake_account_listing)
req = Request.blank('/v1/a')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo_\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo_\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=json')
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_account_listing))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=xml')
resp = req.get_response(self.app)
self.assertEqual(resp.body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<account name="a">',
b'<container><name>bar</name><count>0</count><bytes>0</bytes>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</container>',
b'<subdir name="foo_" />',
b'</account>',
])
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
def test_valid_content_type_on_txt_head(self):
self.fake_swift.register('HEAD', '/v1/a', HTTPNoContent, {
'Content-Length': str(len(self.fake_account_listing)),
'Content-Type': 'application/json'}, self.fake_account_listing)
req = Request.blank('/v1/a', method='HEAD')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertIn('Vary', resp.headers)
# Even though the client didn't send an Accept header, the response
# could change *if a subsequent request does*, so include Vary: Accept
self.assertEqual(resp.headers['Vary'], 'Accept')
self.assertEqual(self.fake_swift.calls[-1], (
'HEAD', '/v1/a?format=json'))
def test_valid_content_type_on_xml_head(self):
self.fake_swift.register('HEAD', '/v1/a', HTTPNoContent, {
'Content-Length': str(len(self.fake_account_listing)),
'Content-Type': 'application/json'}, self.fake_account_listing)
req = Request.blank('/v1/a?format=xml', method='HEAD')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
# query param overrides header, so it won't vary
self.assertNotIn('Vary', resp.headers)
self.assertEqual(self.fake_swift.calls[-1], (
'HEAD', '/v1/a?format=json'))
def test_update_vary_if_present(self):
self.fake_swift.register('HEAD', '/v1/a', HTTPNoContent, {
'Content-Length': str(len(self.fake_account_listing)),
'Content-Type': 'application/json',
'Vary': 'Origin'}, self.fake_account_listing)
req = Request.blank('/v1/a', method='HEAD')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(resp.headers['Vary'], 'Origin, Accept')
self.assertEqual(self.fake_swift.calls[-1], (
'HEAD', '/v1/a?format=json'))
def test_update_vary_does_not_duplicate(self):
self.fake_swift.register('HEAD', '/v1/a', HTTPNoContent, {
'Content-Length': str(len(self.fake_account_listing)),
'Content-Type': 'application/json',
'Vary': 'Accept'}, self.fake_account_listing)
req = Request.blank('/v1/a', method='HEAD')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(resp.headers['Vary'], 'Accept')
self.assertEqual(self.fake_swift.calls[-1], (
'HEAD', '/v1/a?format=json'))
def test_valid_account_with_reserved(self):
body_len = len(self.fake_account_listing_with_reserved)
self.fake_swift.register(
'GET', '/v1/a\xe2\x98\x83', HTTPOk, {
'Content-Length': str(body_len),
'Content-Type': 'application/json',
}, self.fake_account_listing_with_reserved)
req = Request.blank('/v1/a\xe2\x98\x83')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo_\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
"Account listing for a%E2%98%83 had reserved byte in name: "
"'\\x00bar\\x00versions'",
"Account listing for a%E2%98%83 had reserved byte in subdir: "
"'\\x00foo_'",
])
req = Request.blank('/v1/a\xe2\x98\x83', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\n%s\nfoo_\n%s\n' % (
get_reserved_name('bar', 'versions').encode('ascii'),
get_reserved_name('foo_').encode('ascii'),
))
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo_\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=txt', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\n%s\nfoo_\n%s\n' % (
get_reserved_name('bar', 'versions').encode('ascii'),
get_reserved_name('foo_').encode('ascii'),
))
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=json')
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_account_listing))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=json', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_account_listing_with_reserved))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=xml')
resp = req.get_response(self.app)
self.assertEqual(resp.body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<account name="a\xe2\x98\x83">',
b'<container><name>bar</name><count>0</count><bytes>0</bytes>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</container>',
b'<subdir name="foo_" />',
b'</account>',
])
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
req = Request.blank('/v1/a\xe2\x98\x83?format=xml', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(resp.body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<account name="a\xe2\x98\x83">',
b'<container><name>bar</name><count>0</count><bytes>0</bytes>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</container>',
b'<container><name>%s</name>'
b'<count>0</count><bytes>0</bytes>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</container>' % get_reserved_name(
'bar', 'versions').encode('ascii'),
b'<subdir name="foo_" />',
b'<subdir name="%s" />' % get_reserved_name(
'foo_').encode('ascii'),
b'</account>',
])
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a\xe2\x98\x83?format=json'))
def test_valid_container(self):
self.fake_swift.register('GET', '/v1/a/c', HTTPOk, {
'Content-Length': str(len(self.fake_container_listing)),
'Content-Type': 'application/json'}, self.fake_container_listing)
req = Request.blank('/v1/a/c')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo/\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo/\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=json')
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_container_listing))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=xml')
resp = req.get_response(self.app)
self.assertEqual(
resp.body,
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<container name="c">'
b'<object><name>bar</name><hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</object>'
b'<subdir name="foo/"><name>foo/</name></subdir>'
b'</container>'
)
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
def test_valid_container_with_reserved(self):
path = '/v1/a\xe2\x98\x83/c\xf0\x9f\x8c\xb4'
body_len = len(self.fake_container_listing_with_reserved)
self.fake_swift.register(
'GET', path, HTTPOk, {
'Content-Length': str(body_len),
'Content-Type': 'application/json',
}, self.fake_container_listing_with_reserved)
req = Request.blank(path)
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo/\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
self.assertEqual(self.logger.get_lines_for_level('warning'), [
"Container listing for a%E2%98%83/c%F0%9F%8C%B4 had reserved byte "
"in name: '\\x00bar\\x00extra data'",
"Container listing for a%E2%98%83/c%F0%9F%8C%B4 had reserved byte "
"in subdir: '\\x00foo/'",
])
req = Request.blank(path, headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\n%s\nfoo/\n%s\n' % (
get_reserved_name('bar', 'extra data').encode('ascii'),
get_reserved_name('foo/').encode('ascii'),
))
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\nfoo/\n')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=txt', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(resp.body, b'bar\n%s\nfoo/\n%s\n' % (
get_reserved_name('bar', 'extra data').encode('ascii'),
get_reserved_name('foo/').encode('ascii'),
))
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=json')
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_container_listing))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=json', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(json.loads(resp.body),
json.loads(self.fake_container_listing_with_reserved))
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=xml')
resp = req.get_response(self.app)
self.assertEqual(
resp.body,
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<container name="c\xf0\x9f\x8c\xb4">'
b'<object><name>bar</name><hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</object>'
b'<subdir name="foo/"><name>foo/</name></subdir>'
b'</container>'
)
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
req = Request.blank(path + '?format=xml', headers={
'X-Backend-Allow-Reserved-Names': 'true'})
resp = req.get_response(self.app)
self.assertEqual(
resp.body,
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<container name="c\xf0\x9f\x8c\xb4">'
b'<object><name>bar</name><hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</object>'
b'<object><name>%s</name>'
b'<hash>etag</hash><bytes>0</bytes>'
b'<content_type>text/plain</content_type>'
b'<last_modified>1970-01-01T00:00:00.000000</last_modified>'
b'</object>'
b'<subdir name="foo/"><name>foo/</name></subdir>'
b'<subdir name="%s"><name>%s</name></subdir>'
b'</container>' % (
get_reserved_name('bar', 'extra data').encode('ascii'),
get_reserved_name('foo/').encode('ascii'),
get_reserved_name('foo/').encode('ascii'),
))
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=json'))
def test_blank_account(self):
self.fake_swift.register('GET', '/v1/a', HTTPOk, {
'Content-Length': '2', 'Content-Type': 'application/json'}, b'[]')
req = Request.blank('/v1/a')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=json')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body, b'[]')
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
req = Request.blank('/v1/a?format=xml')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<account name="a">',
b'</account>',
])
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a?format=json'))
def test_blank_container(self):
self.fake_swift.register('GET', '/v1/a/c', HTTPOk, {
'Content-Length': '2', 'Content-Type': 'application/json'}, b'[]')
req = Request.blank('/v1/a/c')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=txt')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.body, b'')
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=json')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body, b'[]')
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
req = Request.blank('/v1/a/c?format=xml')
resp = req.get_response(self.app)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.body.split(b'\n'), [
b'<?xml version="1.0" encoding="UTF-8"?>',
b'<container name="c" />',
])
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/a/c?format=json'))
def test_pass_through(self):
def do_test(path):
self.fake_swift.register(
'GET', path, HTTPOk, {
'Content-Length': str(len(self.fake_container_listing)),
'Content-Type': 'application/json'},
self.fake_container_listing)
req = Request.blank(path + '?format=xml')
resp = req.get_response(self.app)
self.assertEqual(resp.body, self.fake_container_listing)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', path + '?format=xml')) # query param is unchanged
do_test('/')
do_test('/v1')
do_test('/auth/v1.0')
do_test('/v1/a/c/o')
def test_static_web_not_json(self):
body = b'doesnt matter'
self.fake_swift.register(
'GET', '/v1/staticweb/not-json', HTTPOk,
{'Content-Length': str(len(body)),
'Content-Type': 'text/plain'},
body)
resp = Request.blank('/v1/staticweb/not-json').get_response(self.app)
self.assertEqual(resp.body, body)
self.assertEqual(resp.headers['Content-Type'], 'text/plain')
# We *did* try, though
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/staticweb/not-json?format=json'))
# TODO: add a similar test that has *no* content-type
# FakeSwift seems to make this hard to do
def test_static_web_not_really_json(self):
body = b'raises ValueError'
self.fake_swift.register(
'GET', '/v1/staticweb/not-json', HTTPOk,
{'Content-Length': str(len(body)),
'Content-Type': 'application/json'},
body)
resp = Request.blank('/v1/staticweb/not-json').get_response(self.app)
self.assertEqual(resp.body, body)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/staticweb/not-json?format=json'))
def test_static_web_pretend_to_be_giant_json(self):
body = json.dumps([
{'name': 'bar', 'hash': 'etag', 'bytes': 0,
'content_type': 'text/plain',
'last_modified': '1970-01-01T00:00:00.000000'},
{'subdir': 'foo/'},
] * 160000).encode('ascii')
self.assertGreater( # sanity
len(body), listing_formats.MAX_CONTAINER_LISTING_CONTENT_LENGTH)
self.fake_swift.register(
'GET', '/v1/staticweb/long-json', HTTPOk,
{'Content-Type': 'application/json'},
body)
resp = Request.blank('/v1/staticweb/long-json').get_response(self.app)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
self.assertEqual(resp.body, body)
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/staticweb/long-json?format=json'))
# TODO: add a similar test for chunked transfers
# (staticweb referencing a DLO that doesn't fit in a single listing?)
def test_static_web_bad_json(self):
def do_test(body_obj):
body = json.dumps(body_obj).encode('ascii')
self.fake_swift.register(
'GET', '/v1/staticweb/bad-json', HTTPOk,
{'Content-Length': str(len(body)),
'Content-Type': 'application/json'},
body)
def do_sub_test(path):
resp = Request.blank(path).get_response(self.app)
self.assertEqual(resp.body, body)
# NB: no charset is added; we pass through whatever we got
self.assertEqual(resp.headers['Content-Type'],
'application/json')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/staticweb/bad-json?format=json'))
do_sub_test('/v1/staticweb/bad-json')
do_sub_test('/v1/staticweb/bad-json?format=txt')
do_sub_test('/v1/staticweb/bad-json?format=xml')
do_sub_test('/v1/staticweb/bad-json?format=json')
do_test({})
do_test({'non-empty': 'hash'})
do_test(None)
do_test(0)
do_test('some string')
do_test([None])
do_test([0])
do_test(['some string'])
def test_static_web_bad_but_not_terrible_json(self):
body = json.dumps([{'no name': 'nor subdir'}]).encode('ascii')
self.fake_swift.register(
'GET', '/v1/staticweb/bad-json', HTTPOk,
{'Content-Length': str(len(body)),
'Content-Type': 'application/json'},
body)
def do_test(path, expect_charset=False):
resp = Request.blank(path).get_response(self.app)
self.assertEqual(resp.body, body)
if expect_charset:
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
else:
self.assertEqual(resp.headers['Content-Type'],
'application/json')
self.assertEqual(self.fake_swift.calls[-1], (
'GET', '/v1/staticweb/bad-json?format=json'))
do_test('/v1/staticweb/bad-json')
do_test('/v1/staticweb/bad-json?format=txt')
do_test('/v1/staticweb/bad-json?format=xml')
# The response we get is *just close enough* to being valid that we
# assume it is and slap on the missing charset. If you set up staticweb
# to serve back such responses, your clients are already hosed.
do_test('/v1/staticweb/bad-json?format=json', expect_charset=True)
| swift-master | test/unit/common/middleware/test_listing_formats.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import unittest
from swift.common.swob import Request, HTTPMovedPermanently
from swift.common.middleware import domain_remap
from swift.common import registry
class FakeApp(object):
def __call__(self, env, start_response):
start_response('200 OK', [])
if six.PY2:
return [env['PATH_INFO']]
else:
print(env)
return [env['PATH_INFO'].encode('latin-1')]
class RedirectSlashApp(object):
def __call__(self, env, start_response):
loc = env['PATH_INFO'] + '/'
return HTTPMovedPermanently(location=loc)(env, start_response)
def start_response(*args):
pass
class TestDomainRemap(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {})
def test_domain_remap_passthrough(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com:8080'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/'])
def test_domain_remap_account(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'AUTH_a.example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_uuid/'])
def test_domain_remap_account_container(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/'])
def test_domain_remap_extra_subdomains(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'x.y.c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'Bad domain in host header'])
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/v1'])
def test_domain_remap_account_with_path_root_unicode_container(self):
req = Request.blank('/%E4%BD%A0%E5%A5%BD',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/\xe4\xbd\xa0\xe5\xa5\xbd'])
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
# parse //v1 as http://v1
req = Request.blank('http://localhost//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_root_path_obj_slash_v1(self):
req = Request.blank('/v1//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj/'])
def test_domain_remap_account_container_with_path(self):
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj'])
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1obj'])
def test_domain_remap_account_matching_ending_not_domain(self):
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.aexample.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/dontchange'])
def test_domain_remap_configured_with_empty_storage_domain(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
{'storage_domain': ''})
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/test'])
def test_storage_domains_conf_format(self):
conf = {'storage_domain': 'foo.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, '}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': 'foo.com, .bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': '.foo.com, .bar.com'}
app = domain_remap.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
def test_domain_remap_configured_with_prefixes(self):
conf = {'reseller_prefixes': 'PREFIX'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/PREFIX_uuid/c/test'])
def test_domain_remap_configured_with_bad_prefixes(self):
conf = {'reseller_prefixes': 'UNKNOWN'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/test'])
def test_domain_remap_configured_with_no_prefixes(self):
conf = {'reseller_prefixes': ''}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/uuid/c/test'])
def test_domain_remap_add_prefix(self):
conf = {'default_reseller_prefix': 'FOO'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/FOO_uuid/test'])
def test_domain_remap_add_prefix_already_there(self):
conf = {'default_reseller_prefix': 'AUTH'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
def test_multiple_storage_domains(self):
conf = {'storage_domain': 'storage1.com, storage2.com'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
def do_test(host):
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': host})
return self.app(req.environ, start_response)
resp = do_test('auth-uuid.storage1.com')
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
resp = do_test('auth-uuid.storage2.com')
self.assertEqual(resp, [b'/v1/AUTH_uuid/test'])
resp = do_test('auth-uuid.storage3.com')
self.assertEqual(resp, [b'/test'])
def test_domain_remap_redirect(self):
app = domain_remap.DomainRemapMiddleware(RedirectSlashApp(), {})
req = Request.blank('/cont', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://auth-uuid.example.com/cont/')
req = Request.blank('/cont/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://auth-uuid.example.com/cont/test/')
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'cont.auth-uuid.example.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://cont.auth-uuid.example.com/test/')
class TestDomainRemapClientMangling(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {
'mangle_client_paths': True})
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/'])
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
# parse //v1 as http://v1
req = Request.blank('http://localhost//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_root_path_obj_slash_v1(self):
req = Request.blank('/v1//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj/'])
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'/v1/AUTH_a/c/v1obj'])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
domain_remap.filter_factory({})
swift_info = registry.get_swift_info()
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': None})
def test_registered_nondefaults(self):
domain_remap.filter_factory({'default_reseller_prefix': 'cupcake',
'mangle_client_paths': 'yes'})
swift_info = registry.get_swift_info()
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': 'cupcake'})
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_domain_remap.py |
# Copyright (c) 2016-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware import copy, proxy_logging
from swift.common.swob import Request, HTTPOk
from swift.common.utils import close_if_possible
from swift.common.wsgi import make_subrequest
from test.debug_logger import debug_logger
from test.unit.common.middleware.helpers import FakeSwift
SUB_GET_PATH = '/v1/a/c/sub_get'
SUB_PUT_POST_PATH = '/v1/a/c/sub_put'
class FakeFilter(object):
def __init__(self, app, conf, register):
self.body = ['FAKE MIDDLEWARE']
self.conf = conf
self.app = app
self.register = register
self.logger = None
def __call__(self, env, start_response):
path = SUB_PUT_POST_PATH
if env['REQUEST_METHOD'] == 'GET':
path = SUB_GET_PATH
# Make a subrequest that will be logged
hdrs = {'content-type': 'text/plain'}
sub_req = make_subrequest(env, path=path,
method=self.conf['subrequest_type'],
headers=hdrs,
agent='FakeApp',
swift_source='FA')
self.register(self.conf['subrequest_type'],
path, HTTPOk, headers=hdrs)
resp = sub_req.get_response(self.app)
close_if_possible(resp.app_iter)
return self.app(env, start_response)
class FakeApp(object):
def __init__(self, conf):
self.fake_logger = debug_logger()
self.fake_swift = self.app = FakeSwift()
self.register = self.fake_swift.register
for filter in reversed([
proxy_logging.filter_factory,
copy.filter_factory,
lambda conf: lambda app: FakeFilter(app, conf, self.register),
proxy_logging.filter_factory]):
self.app = filter(conf)(self.app)
self.app.logger = self.fake_logger
if hasattr(self.app, 'access_logger'):
self.app.access_logger = self.fake_logger
if conf['subrequest_type'] == 'GET':
self.register(conf['subrequest_type'], SUB_GET_PATH, HTTPOk, {})
else:
self.register(conf['subrequest_type'],
SUB_PUT_POST_PATH, HTTPOk, {})
@property
def __call__(self):
return self.app.__call__
class TestSubRequestLogging(unittest.TestCase):
path = '/v1/a/c/o'
def _test_subrequest_logged(self, subrequest_type):
# Test that subrequests made downstream from Copy PUT will be logged
# with the request type of the subrequest as opposed to the GET/PUT.
app = FakeApp({'subrequest_type': subrequest_type})
hdrs = {'content-type': 'text/plain', 'X-Copy-From': 'test/obj'}
req = Request.blank(self.path, method='PUT', headers=hdrs)
app.register('PUT', self.path, HTTPOk, headers=hdrs)
app.register('GET', '/v1/a/test/obj', HTTPOk, headers=hdrs)
req.get_response(app)
info_log_lines = app.fake_logger.get_lines_for_level('info')
self.assertEqual(len(info_log_lines), 4)
subreq_get = '%s %s' % (subrequest_type, SUB_GET_PATH)
subreq_put = '%s %s' % (subrequest_type, SUB_PUT_POST_PATH)
origput = 'PUT %s' % self.path
copyget = 'GET %s' % '/v1/a/test/obj'
# expect GET subreq, copy GET, PUT subreq, orig PUT
self.assertTrue(subreq_get in info_log_lines[0])
self.assertTrue(copyget in info_log_lines[1])
self.assertTrue(subreq_put in info_log_lines[2])
self.assertTrue(origput in info_log_lines[3])
def test_subrequest_logged_x_copy_from(self):
self._test_subrequest_logged('HEAD')
self._test_subrequest_logged('GET')
self._test_subrequest_logged('POST')
self._test_subrequest_logged('PUT')
self._test_subrequest_logged('DELETE')
def _test_subrequest_logged_POST(self, subrequest_type):
app = FakeApp({'subrequest_type': subrequest_type})
hdrs = {'content-type': 'text/plain'}
req = Request.blank(self.path, method='POST', headers=hdrs)
app.register('POST', self.path, HTTPOk, headers=hdrs)
expect_lines = 2
req.get_response(app)
info_log_lines = app.fake_logger.get_lines_for_level('info')
self.assertEqual(len(info_log_lines), expect_lines)
self.assertTrue('Copying object' not in info_log_lines[0])
subreq_put_post = '%s %s' % (subrequest_type, SUB_PUT_POST_PATH)
origpost = 'POST %s' % self.path
# fast post expect POST subreq, original POST
self.assertTrue(subreq_put_post in info_log_lines[0])
self.assertTrue(origpost in info_log_lines[1])
def test_subrequest_logged_with_POST(self):
self._test_subrequest_logged_POST('HEAD')
self._test_subrequest_logged_POST('GET')
self._test_subrequest_logged_POST('POST')
self._test_subrequest_logged_POST('PUT')
self._test_subrequest_logged_POST('DELETE')
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_subrequest_logging.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import healthcheck
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body=b'FAKE APP')(
env, start_response)
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.disable_path = os.path.join(self.tempdir, 'dont-taze-me-bro')
self.got_statuses = []
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = healthcheck.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses.append(status)
def test_healthcheck(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, [b'OK'])
def test_healthcheck_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {})
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, [b'FAKE APP'])
def test_healthcheck_pass_not_disabled(self):
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEqual(['200 OK'], self.got_statuses)
self.assertEqual(resp, [b'OK'])
def test_healthcheck_pass_disabled(self):
open(self.disable_path, 'w')
req = Request.blank('/healthcheck', environ={'REQUEST_METHOD': 'GET'})
app = self.get_app(FakeApp(), {}, disable_path=self.disable_path)
resp = app(req.environ, self.start_response)
self.assertEqual(['503 Service Unavailable'], self.got_statuses)
self.assertEqual(resp, [b'DISABLED BY FILE'])
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_healthcheck.py |
#!/usr/bin/env python
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from six.moves import urllib
from swift.common import swob
from swift.common.middleware import copy
from swift.common.storage_policy import POLICIES
from swift.common.swob import Request, HTTPException
from swift.common.utils import closing_if_possible, md5
from test.debug_logger import debug_logger
from test.unit import patch_policies, FakeRing
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.proxy.controllers.test_obj import set_http_connect, \
PatchedObjControllerApp
class TestCopyConstraints(unittest.TestCase):
def test_validate_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/subdir/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': '/c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_copy_from_header, req)
def test_validate_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/subdir/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': '/c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_destination_header, req)
class TestServerSideCopyMiddleware(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.ssc = copy.filter_factory({})(self.app)
self.ssc.logger = self.app.logger
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Bruce Wayne")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = b''
caught_exc = None
try:
# appease the close-checker
with closing_if_possible(body_iter):
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_ssc(self, req, **kwargs):
return self.call_app(req, app=self.ssc, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_no_object_in_path_pass_through(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c', method='PUT')
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_object_pass_through_methods(self):
for method in ['DELETE', 'GET', 'HEAD', 'REPLICATE']:
self.app.register(method, '/v1/a/c/o', swob.HTTPOk, {})
req = Request.blank('/v1/a/c/o', method=method)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertNotIn('swift.orig_req_method', req.environ)
def test_basic_put_with_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
self.assertEqual(self.app.swift_sources[0], 'SSC')
self.assertEqual(self.app.swift_sources[1], 'SSC')
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_static_large_object_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2?multipart-manifest=put',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2?multipart-manifest=get',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1],
('PUT', '/v1/a/c/o2?multipart-manifest=put'))
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_static_large_object(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o2')])
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container(self):
self.app.register('GET', '/v1/a/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container_and_account(self):
self.app.register('GET', '/v1/a1/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {},
'passed')
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_with_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_slashes_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a1/c1/o/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_unicode(self):
self.app.register('GET', '/v1/a/c/\xF0\x9F\x8C\xB4', swob.HTTPOk,
{}, 'passed')
self.app.register('PUT', '/v1/a/c/\xE2\x98\x83', swob.HTTPCreated, {})
# Just for fun, let's have a mix of properly encoded and not
req = Request.blank('/v1/a/c/%F0\x9F%8C%B4',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Content-Length': '0',
'Destination': 'c/%E2\x98%83'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/\xF0\x9F\x8C\xB4', path)
self.assertIn(('X-Copied-From', 'c/%F0%9F%8C%B4'), headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/%F0%9F%8C%B4', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/%E2%98%83', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, b'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_bad_x_copy_from_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
def test_copy_server_error_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_with_object_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_object_metadata_and_account(self):
self.app.register('GET', '/v1/a1/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_basic_COPY(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o-copy', self.authorized[1].path)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
# For basic test cases, assert orig_req_method behavior
self.assertEqual(req.environ['swift.orig_req_method'], 'COPY')
def test_basic_DLO(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is not sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertNotIn('x-object-manifest', self.app.headers[1])
self.assertNotIn('etag', self.app.headers[1])
def test_basic_DLO_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o?multipart-manifest=get', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1], ('PUT', '/v1/a/c/o-copy'))
self.assertIn('x-object-manifest', self.app.headers[1])
self.assertEqual(self.app.headers[1]['x-object-manifest'], 'some/path')
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
def test_COPY_source_metadata(self):
source_headers = {
'x-object-sysmeta-test1': 'copy me',
'x-object-meta-test2': 'copy me too',
'x-object-transient-sysmeta-test3': 'ditto',
'x-object-sysmeta-container-update-override-etag': 'etag val',
'x-object-sysmeta-container-update-override-size': 'size val',
'x-object-sysmeta-container-update-override-foo': 'bar',
'x-delete-at': 'delete-at-time'}
get_resp_headers = source_headers.copy()
get_resp_headers['etag'] = 'source etag'
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
headers=get_resp_headers, body=b'passed')
def verify_headers(expected_headers, unexpected_headers,
actual_headers):
for k, v in actual_headers:
if k.lower() in expected_headers:
expected_val = expected_headers.pop(k.lower())
self.assertEqual(expected_val, v)
self.assertNotIn(k.lower(), unexpected_headers)
self.assertFalse(expected_headers)
# use a COPY request
self.app.register('PUT', '/v1/a/c/o-copy0', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy0'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy0', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy0', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# use a COPY request with a Range header
self.app.register('PUT', '/v1/a/c/o-copy1', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy1',
'Range': 'bytes=1-2'})
status, resp_headers, body = self.call_ssc(req)
expected_headers = source_headers.copy()
unexpected_headers = (
'x-object-sysmeta-container-update-override-etag',
'x-object-sysmeta-container-update-override-size',
'x-object-sysmeta-container-update-override-foo')
for h in unexpected_headers:
expected_headers.pop(h)
self.assertEqual('201 Created', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy1', path)
verify_headers(
expected_headers, unexpected_headers, put_headers.items())
# etag should not be copied with a Range request
self.assertNotIn('etag', put_headers)
req = Request.blank('/v1/a/c/o-copy1', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
# use a PUT with x-copy-from
self.app.register('PUT', '/v1/a/c/o-copy2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o-copy2', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy2', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy2', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# copy to same path as source
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
def test_COPY_no_destination_header(self):
req = Request.blank(
'/v1/a/c/o', method='COPY', headers={'Content-Length': 0})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(len(self.authorized), 0)
def test_basic_COPY_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o2', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o2', self.authorized[1].path)
def test_COPY_across_containers(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o', self.authorized[1].path)
def test_COPY_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_bad_destination_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_in_OPTIONS_response(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_COPY_in_OPTIONS_response_CORS(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT',
'Access-Control-Allow-Methods': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertTrue(('Access-Control-Allow-Methods',
'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def _test_COPY_source_headers(self, extra_put_headers):
# helper method to perform a COPY with some metadata headers that
# should always be sent to the destination
put_headers = {'Destination': '/c1/o',
'X-Object-Meta-Test2': 'added',
'X-Object-Sysmeta-Test2': 'added',
'X-Object-Transient-Sysmeta-Test2': 'added'}
put_headers.update(extra_put_headers)
get_resp_headers = {
'X-Timestamp': '1234567890.12345',
'X-Backend-Timestamp': '1234567890.12345',
'Content-Type': 'text/original',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile',
'X-Object-Meta-Test': 'original',
'X-Object-Sysmeta-Test': 'original',
'X-Object-Transient-Sysmeta-Test': 'original',
'X-Foo': 'Bar'}
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers)
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY', headers=put_headers)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
# these headers should always be applied to the destination
self.assertEqual('added', req_headers.get('X-Object-Meta-Test2'))
self.assertEqual('added', req_headers.get('X-Object-Sysmeta-Test2'))
self.assertEqual('added',
req_headers.get('X-Object-Transient-Sysmeta-Test2'))
return req_headers
def test_COPY_source_headers_no_updates(self):
# copy should preserve existing metadata if not updated
req_headers = self._test_COPY_source_headers({})
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=myfile',
req_headers.get('Content-Disposition'))
self.assertEqual('original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('original', req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_source_headers_with_updates(self):
# copy should apply any updated values to existing metadata
put_headers = {
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_x_fresh_metadata_no_updates(self):
# existing user metadata should not be copied, sysmeta is copied
put_headers = {
'X-Fresh-Metadata': 'true',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertIn('X-Fresh-Metadata', req_headers)
self.assertNotIn('X-Object-Meta-Test', req_headers)
self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers)
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
self.assertNotIn('Content-Encoding', req_headers)
self.assertNotIn('Content-Disposition', req_headers)
self.assertNotIn('X-Foo', req_headers)
def test_COPY_x_fresh_metadata_with_updates(self):
# existing user metadata should not be copied, new metadata replaces it
put_headers = {
'X-Fresh-Metadata': 'true',
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
def test_COPY_with_single_range(self):
# verify that source etag is not copied when copying a range
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'etag': 'bogus etag'}, "abcdefghijklmnop")
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c1/o', path)
self.assertNotIn('etag', (h.lower() for h in req_headers))
self.assertEqual('6', req_headers['content-length'])
req = swob.Request.blank('/v1/a/c1/o', method='GET')
status, headers, body = self.call_ssc(req)
self.assertEqual(b'fghijk', body)
@patch_policies(with_ec_default=True)
class TestServerSideCopyMiddlewareWithEC(unittest.TestCase):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
def setUp(self):
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
self.ssc = copy.filter_factory({})(self.app)
self.ssc.logger = self.app.logger
self.policy = POLICIES.default
self.app.container_info = dict(self.container_info)
def test_COPY_with_single_range(self):
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = (b'asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
# we need only first chunk to rebuild 5-10 range
fragments = self.policy.pyeclib_driver.encode(chunks[0])
fragment_payloads = []
fragment_payloads.append(fragments)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, b''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
responses += [(201, b'', {})] * self.policy.object_ring.replicas
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
put_hdrs = []
def capture_conn(host, port, dev, part, method, path, *args, **kwargs):
if method == 'PUT':
put_hdrs.append(args[0])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers,
give_connect=capture_conn):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 201)
expected_puts = POLICIES.default.ec_ndata + POLICIES.default.ec_nparity
self.assertEqual(expected_puts, len(put_hdrs))
for hdrs in put_hdrs:
# etag should not be copied from source
self.assertNotIn('etag', (h.lower() for h in hdrs))
def test_COPY_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = (b'a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body, usedforsecurity=False).hexdigest()
req = swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments]
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertTrue(start >= 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
range_not_satisfiable_body = range_not_satisfiable_body.encode('ascii')
if start >= segment_size:
responses = [(416, range_not_satisfiable_body, headers)
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, b''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
# TODO possibly use FakeApp here
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
| swift-master | test/unit/common/middleware/test_copy.py |
swift-master | test/unit/common/middleware/__init__.py |
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
try:
# this test requires the dnspython package to be installed
import dns.resolver # noqa
import dns.exception
except ImportError:
skip = True
else: # executed if the try has no errors
skip = False
from swift.common import registry
from swift.common.middleware import cname_lookup
from swift.common.swob import Request, HTTPMovedPermanently
class FakeApp(object):
def __call__(self, env, start_response):
start_response('200 OK', [])
return [b"FAKE APP"]
class RedirectSlashApp(object):
def __call__(self, env, start_response):
loc = env['PATH_INFO'] + '/'
return HTTPMovedPermanently(location=loc)(env, start_response)
def start_response(*args):
pass
class TestCNAMELookup(unittest.TestCase):
@unittest.skipIf(skip, "can't import dnspython")
def setUp(self):
self.app = cname_lookup.CNAMELookupMiddleware(FakeApp(),
{'lookup_depth': 2})
def test_pass_ip_addresses(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': '10.134.23.198'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'fc00:7ea1:f155::6321:8841'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, d))
def test_passthrough(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'foo.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'foo.example.com:8080'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'foo.example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, '%s.example.com' % d))
def test_good_lookup(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com:8080'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'mysite.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
def test_lookup_chain_too_long(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
def my_lookup(d, r):
if d == 'mysite.com':
site = 'level1.foo.com'
elif d == 'level1.foo.com':
site = 'level2.foo.com'
elif d == 'level2.foo.com':
site = 'bar.example.com'
return 0, site
with mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=my_lookup):
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'CNAME lookup failed after 2 tries'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, 'some.invalid.site.com'))
def test_lookup_chain_bad_target(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp,
[b'CNAME lookup failed to resolve to a valid domain'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, None))
def test_something_weird(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp,
[b'CNAME lookup failed to resolve to a valid domain'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, '%s.example.com' % d))
def test_with_memcache(self):
class memcache_stub(object):
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key, None)
def set(self, key, value, *a, **kw):
self.cache[key] = value
memcache = memcache_stub()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
def test_caching(self):
fail_to_resolve = [b'CNAME lookup failed to resolve to a valid domain']
class memcache_stub(object):
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key, None)
def set(self, key, value, *a, **kw):
# real memcache client will JSON-serialize, so our mock
# should be sure to return unicode
if isinstance(value, bytes):
value = value.decode('utf-8')
self.cache[key] = value
module = 'swift.common.middleware.cname_lookup.lookup_cname'
dns_module = 'dns.resolver.Resolver.query'
memcache = memcache_stub()
with mock.patch(module) as m:
m.return_value = (3600, 'c.example.com')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite2.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
self.assertEqual(m.call_count, 1)
self.assertEqual(memcache.cache.get('cname-mysite2.com'),
'c.example.com')
self.assertIsInstance(req.environ['HTTP_HOST'], str)
self.assertEqual(req.environ['HTTP_HOST'], 'c.example.com')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite2.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
self.assertEqual(m.call_count, 1)
self.assertEqual(memcache.cache.get('cname-mysite2.com'),
'c.example.com')
self.assertIsInstance(req.environ['HTTP_HOST'], str)
self.assertEqual(req.environ['HTTP_HOST'], 'c.example.com')
for exc, num in ((dns.resolver.NXDOMAIN(), 3),
(dns.resolver.NoAnswer(), 4)):
with mock.patch(dns_module) as m:
m.side_effect = exc
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite%d.com' % num})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, fail_to_resolve)
self.assertEqual(m.call_count, 1)
self.assertEqual(memcache.cache.get('cname-mysite3.com'),
False)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite%d.com' % num})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, fail_to_resolve)
self.assertEqual(m.call_count, 1)
self.assertEqual(
memcache.cache.get('cname-mysite%d.com' % num), False)
with mock.patch(dns_module) as m:
m.side_effect = dns.exception.DNSException()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite5.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, fail_to_resolve)
self.assertEqual(m.call_count, 1)
self.assertFalse('cname-mysite5.com' in memcache.cache)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.cache': memcache},
headers={'Host': 'mysite5.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, fail_to_resolve)
self.assertEqual(m.call_count, 2)
self.assertFalse('cname-mysite5.com' in memcache.cache)
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, 'c.aexample.com'))
def test_cname_matching_ending_not_domain(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'foo.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp,
[b'CNAME lookup failed to resolve to a valid domain'])
@mock.patch('swift.common.middleware.cname_lookup.lookup_cname',
new=lambda d, r: (0, None))
def test_cname_configured_with_empty_storage_domain(self):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(),
{'storage_domain': '',
'lookup_depth': 2})
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
resp = app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
def test_storage_domains_conf_format(self):
conf = {'storage_domain': 'foo.com'}
app = cname_lookup.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, '}
app = cname_lookup.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com'])
conf = {'storage_domain': 'foo.com, bar.com'}
app = cname_lookup.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': 'foo.com, .bar.com'}
app = cname_lookup.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
conf = {'storage_domain': '.foo.com, .bar.com'}
app = cname_lookup.filter_factory(conf)(FakeApp())
self.assertEqual(app.storage_domain, ['.foo.com', '.bar.com'])
def test_multiple_storage_domains(self):
conf = {'storage_domain': 'storage1.com, storage2.com',
'lookup_depth': 2}
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
def do_test(lookup_back):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.a.example.com'})
module = 'swift.common.middleware.cname_lookup.lookup_cname'
with mock.patch(module, lambda d, r: (0, lookup_back)):
return app(req.environ, start_response)
resp = do_test('c.storage1.com')
self.assertEqual(resp, [b'FAKE APP'])
resp = do_test('c.storage2.com')
self.assertEqual(resp, [b'FAKE APP'])
bad_domain = [b'CNAME lookup failed to resolve to a valid domain']
resp = do_test('c.badtest.com')
self.assertEqual(resp, bad_domain)
@mock.patch('dns.resolver.Resolver.query',
side_effect=dns.exception.DNSException)
def test_host_is_storage_domain(self, mock_lookup):
conf = {'storage_domain': 'storage.example.com',
'lookup_depth': 2}
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
def do_test(host):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': host})
return app(req.environ, start_response)
bad_domain = [b'CNAME lookup failed to resolve to a valid domain']
resp = do_test('c.badtest.com')
self.assertEqual(resp, bad_domain)
self.assertEqual(1, len(mock_lookup.mock_calls))
mock_lookup.reset_mock()
resp = do_test('storage.example.com')
self.assertEqual(resp, [b'FAKE APP'])
self.assertEqual(0, len(mock_lookup.mock_calls))
def test_resolution_to_storage_domain_exactly(self):
conf = {'storage_domain': 'example.com',
'lookup_depth': 1}
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
module = 'swift.common.middleware.cname_lookup.lookup_cname'
with mock.patch(module, lambda d, r: (0, 'example.com')):
resp = app(req.environ, start_response)
self.assertEqual(resp, [b'FAKE APP'])
def test_redirect(self):
app = cname_lookup.CNAMELookupMiddleware(RedirectSlashApp(), {})
module = 'swift.common.middleware.cname_lookup.lookup_cname'
with mock.patch(module, lambda d, r: (0, 'cont.acct.example.com')):
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'mysite.com'})
resp = req.get_response(app)
self.assertEqual(resp.status_int, 301)
self.assertEqual(resp.headers.get('Location'),
'http://mysite.com/test/')
def test_configured_nameservers(self):
class MockedResolver(object):
def __init__(self):
self.nameservers = None
self.nameserver_ports = None
def query(self, *args, **kwargs):
raise Exception('Stop processing')
def reset(self):
self.nameservers = None
self.nameserver_ports = None
mocked_resolver = MockedResolver()
dns_module = 'dns.resolver.Resolver'
# If no nameservers provided in conf, resolver nameservers is unset
for conf in [{}, {'nameservers': ''}]:
mocked_resolver.reset()
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertIsNone(mocked_resolver.nameservers)
# If invalid nameservers provided, resolver nameservers is unset
mocked_resolver.reset()
conf = {'nameservers': '127.0.0.1, 127.0.0.2, a.b.c.d'}
with mock.patch(dns_module, return_value=mocked_resolver):
with self.assertRaises(ValueError) as exc_mgr:
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIn('Invalid cname_lookup/nameservers configuration',
str(exc_mgr.exception))
# If nameservers provided in conf, resolver nameservers is set
mocked_resolver.reset()
conf = {'nameservers': '127.0.0.1'}
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertEqual(mocked_resolver.nameservers, ['127.0.0.1'])
self.assertEqual(mocked_resolver.nameserver_ports, {})
# IPv6 is OK
mocked_resolver.reset()
conf = {'nameservers': '[::1]'}
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertEqual(mocked_resolver.nameservers, ['::1'])
self.assertEqual(mocked_resolver.nameserver_ports, {})
# As are port overrides
mocked_resolver.reset()
conf = {'nameservers': '127.0.0.1:5354'}
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertEqual(mocked_resolver.nameservers, ['127.0.0.1'])
self.assertEqual(mocked_resolver.nameserver_ports, {'127.0.0.1': 5354})
# And IPv6 with port overrides
mocked_resolver.reset()
conf = {'nameservers': '[2001:db8::ff00:42:8329]:1234'}
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertEqual(mocked_resolver.nameservers, [
'2001:db8::ff00:42:8329'])
self.assertEqual(mocked_resolver.nameserver_ports, {
'2001:db8::ff00:42:8329': 1234})
# Also accept lists, and bring it all together
mocked_resolver.reset()
conf = {'nameservers': '[::1], 127.0.0.1:5354, '
'[2001:db8::ff00:42:8329]:1234'}
with mock.patch(dns_module, return_value=mocked_resolver):
app = cname_lookup.CNAMELookupMiddleware(FakeApp(), conf)
self.assertIs(app.resolver, mocked_resolver)
self.assertEqual(mocked_resolver.nameservers, [
'::1', '127.0.0.1', '2001:db8::ff00:42:8329'])
self.assertEqual(mocked_resolver.nameserver_ports, {
'127.0.0.1': 5354, '2001:db8::ff00:42:8329': 1234})
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
cname_lookup.filter_factory({})
swift_info = registry.get_swift_info()
self.assertIn('cname_lookup', swift_info)
self.assertEqual(swift_info['cname_lookup'].get('lookup_depth'), 1)
def test_registered_nondefaults(self):
cname_lookup.filter_factory({'lookup_depth': '2'})
swift_info = registry.get_swift_info()
self.assertIn('cname_lookup', swift_info)
self.assertEqual(swift_info['cname_lookup'].get('lookup_depth'), 2)
| swift-master | test/unit/common/middleware/test_cname_lookup.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request, wsgify, HTTPForbidden, HTTPOk, \
HTTPServiceUnavailable, HTTPNotFound
from swift.common.middleware import account_quotas, copy
from test.unit import patch_policies
from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
def __init__(self, val):
self.val = val
def get(self, *args):
return self.val
def set(self, *args, **kwargs):
pass
class FakeAuthFilter(object):
def __init__(self, app):
self.app = app
@wsgify
def __call__(self, req):
def authorize(req):
if req.headers['x-auth-token'] == 'secret':
return
return HTTPForbidden(request=req)
req.environ['swift.authorize'] = authorize
return req.get_response(self.app)
class TestAccountQuota(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000'})
self.app.register('HEAD', '/v1/a/c', HTTPOk, {
'x-backend-storage-policy-index': '1'})
self.app.register('POST', '/v1/a', HTTPOk, {})
self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
def test_unauthorized(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
# Response code of 200 because authentication itself is not done here
self.assertEqual(res.status_int, 200)
def test_no_quotas(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_obj_request_ignores_attempt_to_set_quotas(self):
# If you try to set X-Account-Meta-* on an object, it's ignored, so
# the quota middleware shouldn't complain about it even if we're not a
# reseller admin.
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
headers={'X-Account-Meta-Quota-Bytes': '99999'},
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_container_request_ignores_attempt_to_set_quotas(self):
# As with an object, if you try to set X-Account-Meta-* on a
# container, it's ignored.
self.app.register('PUT', '/v1/a/c', HTTPOk, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c',
headers={'X-Account-Meta-Quota-Bytes': '99999'},
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_bogus_quota_is_ignored(self):
# This can happen if the metadata was set by a user prior to the
# activation of the account-quota middleware
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': 'pasty-plastogene'})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': '0'})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
@patch_policies
def test_exceed_per_policy_quota(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '100',
'x-account-storage-policy-unu-bytes-used': '100',
'x-account-sysmeta-quota-bytes-policy-1': '10',
'x-account-meta-quota-bytes': '1000'})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds policy quota.')
@patch_policies
def test_policy_quota_translation(self):
def do_test(method):
self.app.register(method, '/v1/a', HTTPOk, {
'x-account-bytes-used': '100',
'x-account-storage-policy-unu-bytes-used': '100',
'x-account-sysmeta-quota-bytes-policy-1': '10',
'x-account-meta-quota-bytes': '1000'})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', method=method, environ={
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers.get(
'X-Account-Meta-Quota-Bytes'), '1000')
self.assertEqual(res.headers.get(
'X-Account-Sysmeta-Quota-Bytes-Policy-1'), '10')
self.assertEqual(res.headers.get(
'X-Account-Quota-Bytes-Policy-Unu'), '10')
self.assertEqual(res.headers.get(
'X-Account-Storage-Policy-Unu-Bytes-Used'), '100')
do_test('GET')
do_test('HEAD')
def test_exceed_quota_not_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': '0'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'bad-secret'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
def test_exceed_quota_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': '0'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
def test_under_quota_not_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '0',
'x-account-meta-quota-bytes': '1000'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'bad-secret'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
def test_under_quota_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '0',
'x-account-meta-quota-bytes': '1000'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_quota_bytes_on_empty_account_not_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '0',
'x-account-meta-quota-bytes': '10'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret',
'content-length': '100'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_quota_bytes_not_authorized(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '100',
'x-account-meta-quota-bytes': '1000'})
app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret',
'content-length': '901'},
environ={'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_over_quota_container_create_still_works(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1001',
'x-account-meta-quota-bytes': '1000'})
self.app.register('PUT', '/v1/a/new_container', HTTPOk, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/new_container',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_CONTAINER_META_BERT': 'ernie',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_over_quota_container_post_still_works(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1001',
'x-account-meta-quota-bytes': '1000'})
self.app.register('POST', '/v1/a/new_container', HTTPOk, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/new_container',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_BERT': 'ernie',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_over_quota_obj_post_still_works(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1001',
'x-account-meta-quota-bytes': '1000'})
self.app.register('POST', '/v1/a/c/o', HTTPOk, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_OBJECT_META_BERT': 'ernie',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': '0'})
self.app.register('PUT', '/v1/a', HTTPOk, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller_copy_from(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '500',
'x-account-meta-quota-bytes': '1000'})
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
'content-length': '1000'}, b'a' * 1000)
app = copy.filter_factory({})(
account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache,
'reseller_request': True},
headers={'x-copy-from': 'c2/o2'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller_copy_verb(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '500',
'x-account-meta-quota-bytes': '1000'})
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
'content-length': '1000'}, b'a' * 1000)
app = copy.filter_factory({})(
account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache,
'reseller_request': True},
headers={'Destination': 'c/o'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_bad_application_quota(self):
self.app.register('PUT', '/v1/a/c/o', HTTPNotFound, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 404)
def test_no_info_quota(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_not_exceed_bytes_quota(self):
self.app.register('HEAD', '/v1/a', HTTPOk, {
'x-account-bytes-used': '1000',
'x-account-meta-quota-bytes': '2000'})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_invalid_quotas(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': 'abc',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
self.assertEqual(self.app.calls, [])
@patch_policies
def test_invalid_policy_quota(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_NULO': 'abc',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
self.assertEqual(self.app.calls, [])
def test_valid_quotas_admin(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
self.assertEqual(self.app.calls, [])
@patch_policies
def test_valid_policy_quota_admin(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_UNU': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
self.assertEqual(self.app.calls, [])
def test_valid_quotas_reseller(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': '100',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
self.assertEqual(self.app.calls_with_headers, [
('POST', '/v1/a', {'Host': 'localhost:80',
'X-Account-Meta-Quota-Bytes': '100'})])
@patch_policies
def test_valid_policy_quota_reseller(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_NULO': '100',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
self.assertEqual(self.app.calls_with_headers, [
('POST', '/v1/a', {
'Host': 'localhost:80',
'X-Account-Sysmeta-Quota-Bytes-Policy-0': '100'})])
def test_delete_quotas(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': ''})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
def test_delete_quotas_with_remove_header(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_REMOVE_ACCOUNT_META_QUOTA_BYTES': 'True'})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
def test_delete_quotas_reseller(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': '',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_delete_quotas_with_remove_header_reseller(self):
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
'swift.cache': cache,
'HTTP_X_REMOVE_ACCOUNT_META_QUOTA_BYTES': 'True',
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_invalid_request_exception(self):
self.app.register('PUT', '/v1', HTTPServiceUnavailable, {})
app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
self.assertEqual(res.status_int, 503)
class AccountQuotaCopyingTestCases(unittest.TestCase):
def setUp(self):
self.headers = []
self.app = FakeSwift()
self.app.register('HEAD', '/v1/a', HTTPOk, self.headers)
self.app.register('HEAD', '/v1/a/c', HTTPOk, {
'x-backend-storage-policy-index': '1'})
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
'content-length': '1000'})
self.aq_filter = account_quotas.filter_factory({})(self.app)
self.copy_filter = copy.filter_factory({})(self.aq_filter)
def test_exceed_bytes_quota_copy_from(self):
self.headers[:] = [('x-account-bytes-used', '500'),
('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_bytes_quota_copy_verb(self):
self.headers[:] = [('x-account-bytes-used', '500'),
('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_from(self):
self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
self.headers[:] = [('x-account-bytes-used', '0'),
('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_not_exceed_bytes_quota_copy_verb(self):
self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
self.headers[:] = [('x-account-bytes-used', '0'),
('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_quota_copy_from_bad_src(self):
self.headers[:] = [('x-account-bytes-used', '0'),
('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': 'bad_path'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 412)
self.headers[:] = [('x-account-bytes-used', '1000'),
('x-account-meta-quota-bytes', '0')]
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 412)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_account_quotas.py |
# coding: utf-8
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import shutil
import tempfile
from textwrap import dedent
import time
import unittest
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware import dlo
from swift.common.utils import closing_if_possible, md5
from test.unit.common.middleware.helpers import FakeSwift
LIMIT = 'swift.common.constraints.CONTAINER_LISTING_LIMIT'
def md5hex(s):
if not isinstance(s, bytes):
s = s.encode('utf-8')
return md5(s, usedforsecurity=False).hexdigest()
class DloTestCase(unittest.TestCase):
def call_dlo(self, req, app=None):
if app is None:
app = self.dlo
req.headers.setdefault("User-Agent", "Soap Opera")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = b''
# appease the close-checker
with closing_if_possible(body_iter):
for chunk in body_iter:
body += chunk
return status[0], headers[0], body
def setUp(self):
self.app = FakeSwift()
self.dlo = dlo.filter_factory({
# don't slow down tests with rate limiting
'rate_limit_after_segment': '1000000',
})(self.app)
self.dlo.logger = self.app.logger
self.app.register(
'GET', '/v1/AUTH_test/c/seg_01',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("aaaaa")},
b'aaaaa')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("bbbbb")},
b'bbbbb')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("ccccc")},
b'ccccc')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_04',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("ddddd")},
b'ddddd')
self.app.register(
'GET', '/v1/AUTH_test/c/seg_05',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("eeeee")},
b'eeeee')
# an unrelated object (not seg*) to test the prefix matching
self.app.register(
'GET', '/v1/AUTH_test/c/catpicture.jpg',
swob.HTTPOk, {'Content-Length': '9',
'Etag': md5hex("meow meow meow meow")},
b'meow meow meow meow')
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest',
swob.HTTPOk, {'Content-Length': '17', 'Etag': 'manifest-etag',
'X-Object-Manifest': 'c/seg'},
b'manifest-contents')
lm = '2013-11-22T02:42:13.781760'
ct = 'application/octet-stream'
segs = [{"hash": md5hex("aaaaa"), "bytes": 5,
"name": "seg_01", "last_modified": lm, "content_type": ct},
{"hash": md5hex("bbbbb"), "bytes": 5,
"name": "seg_02", "last_modified": lm, "content_type": ct},
{"hash": md5hex("ccccc"), "bytes": 5,
"name": "seg_03", "last_modified": lm, "content_type": ct},
{"hash": md5hex("ddddd"), "bytes": 5,
"name": "seg_04", "last_modified": lm, "content_type": ct},
{"hash": md5hex("eeeee"), "bytes": 5,
"name": "seg_05", "last_modified": lm, "content_type": ct}]
full_container_listing = segs + [{"hash": "cats-etag", "bytes": 9,
"name": "catpicture.jpg",
"last_modified": lm,
"content_type": "application/png"}]
self.app.register(
'GET', '/v1/AUTH_test/c',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(full_container_listing).encode('ascii'))
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs).encode('ascii'))
# This is to let us test multi-page container listings; we use the
# trailing underscore to send small (pagesize=3) listings.
#
# If you're testing against this, be sure to mock out
# CONTAINER_LISTING_LIMIT to 3 in your test.
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest-many-segments',
swob.HTTPOk, {'Content-Length': '7', 'Etag': 'etag-manyseg',
'X-Object-Manifest': 'c/seg_'},
b'manyseg')
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg_',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs[:3]).encode('ascii'))
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg_&marker=seg_03',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps(segs[3:]).encode('ascii'))
# Here's a manifest with 0 segments
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest-no-segments',
swob.HTTPOk, {'Content-Length': '7', 'Etag': 'noseg',
'X-Object-Manifest': 'c/noseg_'},
b'noseg')
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=noseg_',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps([]).encode('ascii'))
class TestDloPutManifest(DloTestCase):
def setUp(self):
super(TestDloPutManifest, self).setUp()
self.app.register(
'PUT', '/v1/AUTH_test/c/m',
swob.HTTPCreated, {}, None)
def test_validating_x_object_manifest(self):
exp_okay = ["c/o",
"c/obj/with/slashes",
"c/obj/with/trailing/slash/",
"c/obj/with//multiple///slashes////adjacent"]
exp_bad = ["",
"/leading/slash",
"double//slash",
"container-only",
"whole-container/",
"c/o?short=querystring",
"c/o?has=a&long-query=string"]
got_okay = []
got_bad = []
for val in (exp_okay + exp_bad):
req = swob.Request.blank("/v1/AUTH_test/c/m",
environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": val})
status, _, _ = self.call_dlo(req)
if status.startswith("201"):
got_okay.append(val)
else:
got_bad.append(val)
self.assertEqual(exp_okay, got_okay)
self.assertEqual(exp_bad, got_bad)
def test_validation_watches_manifests_with_slashes(self):
self.app.register(
'PUT', '/v1/AUTH_test/con/w/x/y/z',
swob.HTTPCreated, {}, None)
req = swob.Request.blank(
"/v1/AUTH_test/con/w/x/y/z", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": 'good/value'})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "201 Created")
req = swob.Request.blank(
"/v1/AUTH_test/con/w/x/y/z", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": '/badvalue'})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "400 Bad Request")
def test_validation_ignores_containers(self):
self.app.register(
'PUT', '/v1/a/c',
swob.HTTPAccepted, {}, None)
req = swob.Request.blank(
"/v1/a/c", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": "/superbogus/?wrong=in&every=way"})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "202 Accepted")
def test_validation_ignores_accounts(self):
self.app.register(
'PUT', '/v1/a',
swob.HTTPAccepted, {}, None)
req = swob.Request.blank(
"/v1/a", environ={'REQUEST_METHOD': 'PUT'},
headers={"X-Object-Manifest": "/superbogus/?wrong=in&every=way"})
status, _, _ = self.call_dlo(req)
self.assertEqual(status, "202 Accepted")
class TestDloHeadManifest(DloTestCase):
def test_head_large_object(self):
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
def test_head_large_object_too_many_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'HEAD'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
# etag is manifest's etag
self.assertEqual(headers["Etag"], "etag-manyseg")
self.assertIsNone(headers.get("Content-Length"))
def test_head_large_object_no_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-no-segments',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], '"%s"' % md5hex(""))
self.assertEqual(headers["Content-Length"], "0")
# one request to HEAD the manifest
# one request for the first page of listings
# *zero* requests for the second page of listings
self.assertEqual(
self.app.calls,
[('HEAD', '/v1/AUTH_test/mancon/manifest-no-segments'),
('GET', '/v1/AUTH_test/c?prefix=noseg_')])
class TestDloGetManifest(DloTestCase):
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def test_get_manifest(self):
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], expected_etag)
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, b'aaaaabbbbbcccccdddddeeeee')
for _, _, hdrs in self.app.calls_with_headers[1:]:
ua = hdrs.get("User-Agent", "")
self.assertTrue("DLO MultipartGET" in ua)
self.assertFalse("DLO MultipartGET DLO MultipartGET" in ua)
# the first request goes through unaltered
self.assertFalse(
"DLO MultipartGET" in self.app.calls_with_headers[0][2])
# we set swift.source for everything but the first request
self.assertEqual(self.app.swift_sources,
[None, 'DLO', 'DLO', 'DLO', 'DLO', 'DLO', 'DLO'])
def test_get_non_manifest_passthrough(self):
req = swob.Request.blank('/v1/AUTH_test/c/catpicture.jpg',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(body, b"meow meow meow meow")
def test_get_non_object_passthrough(self):
self.app.register('GET', '/info', swob.HTTPOk,
{}, 'useful stuff here')
req = swob.Request.blank('/info',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'useful stuff here')
self.assertEqual(self.app.call_count, 1)
self.assertFalse(self.app.unread_requests)
def test_get_manifest_passthrough(self):
# reregister it with the query param
self.app.register(
'GET', '/v1/AUTH_test/mancon/manifest?multipart-manifest=get',
swob.HTTPOk, {'Content-Length': '17', 'Etag': 'manifest-etag',
'X-Object-Manifest': 'c/seg'},
'manifest-contents')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'multipart-manifest=get'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], "manifest-etag")
self.assertEqual(body, b'manifest-contents')
self.assertFalse(self.app.unread_requests)
# HEAD query param worked, since GET with query param is registered
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'multipart-manifest=get'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"], "manifest-etag")
self.assertEqual(body, b'')
def test_error_passthrough(self):
self.app.register(
'GET', '/v1/AUTH_test/gone/404ed',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/gone/404ed',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '404 Not Found')
# ... and multipart-manifest=get also returns registered 404 response
req = swob.Request.blank('/v1/AUTH_test/gone/404ed',
method='GET',
params={'multipart-manifest': 'get'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '404 Not Found')
# HEAD with same params find same registered GET
req = swob.Request.blank('/v1/AUTH_test/gone/404ed',
method='HEAD',
params={'multipart-manifest': 'get'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '404 Not Found')
def test_get_range(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=8-17'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, b'bbcccccddd')
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
self.assertEqual(headers.get("Etag"), expected_etag)
self.assertEqual(self.app.unread_requests, {})
def test_get_big_manifest(self):
self.app.register(
'GET', '/v1/AUTH_test/mancon/big-manifest',
swob.HTTPOk, {'Content-Length': '17000', 'Etag': 'manifest-etag',
'X-Object-Manifest': 'c/seg'},
b'manifest-contents' * 1000)
req = swob.Request.blank('/v1/AUTH_test/mancon/big-manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, b'aaaaabbbbbcccccdddddeeeee')
expected_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
self.assertEqual(headers.get("Etag"), expected_etag)
self.assertEqual(self.app.unread_requests, {
# Since we don't know how big this will be, we just disconnect
('GET', '/v1/AUTH_test/mancon/big-manifest'): 1,
})
def test_get_range_on_segment_boundaries(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-19'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
self.assertEqual(body, b'cccccddddd')
def test_get_range_first_byte(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-0'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, b'a')
def test_get_range_last_byte(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=24-24'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "1")
self.assertEqual(body, b'e')
def test_get_range_overlapping_end(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=18-30'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "7")
self.assertEqual(headers["Content-Range"], "bytes 18-24/25")
self.assertEqual(body, b'ddeeeee')
def test_get_range_unsatisfiable(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=25-30'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "416 Requested Range Not Satisfiable")
expected_headers = (
('Accept-Ranges', 'bytes'),
('Content-Range', 'bytes */25'),
)
for header_value_pair in expected_headers:
self.assertIn(header_value_pair, headers)
def test_get_range_many_segments_satisfiable(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=3-12'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "10")
# The /15 here indicates that this is a 15-byte object. DLO can't tell
# if there are more segments or not without fetching more container
# listings, though, so we just go with the sum of the lengths of the
# segments we can see. In an ideal world, this would be "bytes 3-12/*"
# to indicate that we don't know the full object length. However, RFC
# 2616 section 14.16 explicitly forbids us from doing that:
#
# A response with status code 206 (Partial Content) MUST NOT include
# a Content-Range field with a byte-range-resp-spec of "*".
#
# Since the truth is forbidden, we lie.
self.assertEqual(headers["Content-Range"], "bytes 3-12/15")
self.assertEqual(body, b"aabbbbbccc")
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/mancon/manifest-many-segments'),
('GET', '/v1/AUTH_test/c?prefix=seg_'),
('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')])
def test_get_range_many_segments_satisfiability_unknown(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=10-22'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
# this requires multiple pages of container listing, so we can't send
# a Content-Length header
self.assertIsNone(headers.get("Content-Length"))
self.assertEqual(body, b"aaaaabbbbbcccccdddddeeeee")
def test_get_suffix_range(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-40'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "206 Partial Content")
self.assertEqual(headers["Content-Length"], "25")
self.assertEqual(body, b"aaaaabbbbbcccccdddddeeeee")
def test_get_suffix_range_many_segments(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertIsNone(headers.get("Content-Length"))
self.assertIsNone(headers.get("Content-Range"))
self.assertEqual(body, b"aaaaabbbbbcccccdddddeeeee")
def test_get_multi_range(self):
# DLO doesn't support multi-range GETs. The way that you express that
# in HTTP is to return a 200 response containing the whole entity.
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=5-9,15-19'})
with mock.patch(LIMIT, 30):
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers.get("Content-Length"), '25')
self.assertIsNone(headers.get("Content-Range"))
self.assertEqual(body, b'aaaaabbbbbcccccdddddeeeee')
def test_if_match_matches(self):
manifest_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
self.assertEqual(body, b'aaaaabbbbbcccccdddddeeeee')
def test_if_match_does_not_match(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, b'')
def test_if_none_match_matches(self):
manifest_etag = '"%s"' % md5hex(
md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") +
md5hex("ddddd") + md5hex("eeeee"))
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': manifest_etag})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '304 Not Modified')
self.assertEqual(headers['Content-Length'], '0')
self.assertEqual(body, b'')
def test_if_none_match_does_not_match(self):
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-None-Match': 'not it'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK')
self.assertEqual(headers['Content-Length'], '25')
self.assertEqual(body, b'aaaaabbbbbcccccdddddeeeee')
def test_get_with_if_modified_since(self):
# It's important not to pass the If-[Un]Modified-Since header to the
# proxy for segment GET requests, as it may result in 304 Not Modified
# responses, and those don't contain segment data.
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': 'Wed, 12 Feb 2014 22:24:52 GMT',
'If-Unmodified-Since': 'Thu, 13 Feb 2014 23:25:53 GMT'})
status, headers, body = self.call_dlo(req)
for _, _, hdrs in self.app.calls_with_headers[1:]:
self.assertFalse('If-Modified-Since' in hdrs)
self.assertFalse('If-Unmodified-Since' in hdrs)
def test_server_error_fetching_first_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_01',
swob.HTTPServiceUnavailable, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "503 Service Unavailable")
self.assertEqual(self.app.unread_requests, {})
self.assertEqual(self.dlo.logger.get_lines_for_level('error'), [
'While processing manifest /v1/AUTH_test/mancon/manifest, '
'got 503 (<html><h1>Service Unavailable</h1><p>The server is '
'curren...) while retrieving /v1/AUTH_test/c/seg_01',
])
def test_client_error_fetching_first_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_01',
swob.HTTPForbidden, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "409 Conflict")
self.assertEqual(self.app.unread_requests, {})
self.assertEqual(self.dlo.logger.get_lines_for_level('error'), [
'While processing manifest /v1/AUTH_test/mancon/manifest, '
'got 403 (<html><h1>Forbidden</h1><p>Access was denied to this '
'reso...) while retrieving /v1/AUTH_test/c/seg_01',
])
def test_error_fetching_second_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPForbidden, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
# first segment made it out
self.assertEqual(body, b'aaaaa')
self.assertEqual(self.app.unread_requests, {})
self.assertEqual(self.dlo.logger.get_lines_for_level('error'), [
'While processing manifest /v1/AUTH_test/mancon/manifest, '
'got 403 (<html><h1>Forbidden</h1><p>Access was denied to this '
'reso...) while retrieving /v1/AUTH_test/c/seg_02',
])
def test_error_listing_container_first_listing_request(self):
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg_',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "404 Not Found")
def test_error_listing_container_second_listing_request(self):
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg_&marker=seg_03',
swob.HTTPNotFound, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=-5'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "200 OK")
self.assertEqual(body, b'aaaaabbbbbccccc')
def test_error_listing_container_HEAD(self):
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=seg_',
# for example, if a manifest refers to segments in another
# container, but the user is accessing the manifest via a
# container-level tempurl key
swob.HTTPUnauthorized, {}, None)
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments',
environ={'REQUEST_METHOD': 'HEAD'})
with mock.patch(LIMIT, 3):
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "401 Unauthorized")
self.assertEqual(body, b"")
def test_mismatched_etag_fetching_second_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("bbbbb")},
'WRONG')
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers['Content-Length'], "25")
# stop after error
self.assertEqual(body, b"aaaaaWRONG")
log_lines = self.dlo.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 1,
'Expected one log line, got %r' % log_lines)
self.assertEqual(log_lines[0][:21], 'Bad MD5 checksum for ')
def test_mismatched_length_fetching_second_segment(self):
self.app.register(
'GET', '/v1/AUTH_test/c/seg_02',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("bbbb")},
# Use a list so we can get a discrepency between content-length and
# number of bytes in the app_iter
[b'b' * 4])
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, "200 OK")
self.assertEqual(headers['Content-Length'], "25")
# stop after error
self.assertEqual(body, b"aaaaabbbb")
log_lines = self.dlo.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 1,
'Expected one log line, got %r' % log_lines)
self.assertEqual(log_lines[0][:24], 'Bad response length for ')
def test_etag_comparison_ignores_quotes(self):
# a little future-proofing here in case we ever fix this in swob
self.app.register(
'HEAD', '/v1/AUTH_test/mani/festo',
swob.HTTPOk, {'Content-Length': '0', 'Etag': 'blah',
'X-Object-Manifest': 'c/quotetags'}, None)
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=quotetags',
swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'},
json.dumps([{"hash": "\"abc\"", "bytes": 5, "name": "quotetags1",
"last_modified": "2013-11-22T02:42:14.261620",
"content-type": "application/octet-stream"},
{"hash": "def", "bytes": 5, "name": "quotetags2",
"last_modified": "2013-11-22T02:42:14.261620",
"content-type": "application/octet-stream"}]))
req = swob.Request.blank('/v1/AUTH_test/mani/festo',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(headers["Etag"],
'"' + md5(b"abcdef",
usedforsecurity=False).hexdigest() + '"')
def test_object_prefix_quoting(self):
self.app.register(
'GET', '/v1/AUTH_test/man/accent',
swob.HTTPOk, {'Content-Length': '0', 'Etag': 'blah',
'X-Object-Manifest': u'c/é'.encode('utf-8')}, None)
segs = [{"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é1"},
{"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é2"}]
self.app.register(
'GET', '/v1/AUTH_test/c?prefix=%C3%A9',
swob.HTTPOk, {'Content-Type': 'application/json'},
json.dumps(segs).encode('ascii'))
# NB: wsgi string
path = '/v1/AUTH_test/c/\xC3\xa9'
self.app.register(
'GET', path + '1',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("AAAAA")},
b"AAAAA")
self.app.register(
'GET', path + '2',
swob.HTTPOk, {'Content-Length': '5', 'Etag': md5hex("BBBBB")},
b"BBBBB")
req = swob.Request.blank('/v1/AUTH_test/man/accent',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
self.assertEqual(status, "200 OK")
self.assertEqual(body, b'AAAAABBBBB')
def test_get_taking_too_long(self):
the_time = [time.time()]
def mock_time():
return the_time[0]
# this is just a convenient place to hang a time jump
def mock_is_success(status_int):
the_time[0] += 9 * 3600
return status_int // 100 == 2
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
with mock.patch('swift.common.request_helpers.time.time',
mock_time), \
mock.patch('swift.common.request_helpers.is_success',
mock_is_success), \
mock.patch.object(dlo, 'is_success', mock_is_success):
status, headers, body = self.call_dlo(req)
self.assertEqual(status, '200 OK')
self.assertEqual(body, b'aaaaabbbbbccccc')
def test_get_oversize_segment(self):
# If we send a Content-Length header to the client, it's based on the
# container listing. If a segment gets bigger by the time we get to it
# (like if a client uploads a bigger segment w/the same name), we need
# to not send anything beyond the length we promised. Also, we should
# probably raise an exception.
# This is now longer than the original seg_03+seg_04+seg_05 combined
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '20', 'Etag': 'seg03-etag'},
'cccccccccccccccccccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
self.assertEqual(body, b'aaaaabbbbbccccccccccccccc')
self.assertEqual(
self.app.calls,
[('GET', '/v1/AUTH_test/mancon/manifest'),
('GET', '/v1/AUTH_test/c?prefix=seg'),
('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'),
('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')])
def test_get_undersize_segment(self):
# If we send a Content-Length header to the client, it's based on the
# container listing. If a segment gets smaller by the time we get to
# it (like if a client uploads a smaller segment w/the same name), we
# need to raise an exception so that the connection will be closed by
# the WSGI server. Otherwise, the WSGI server will be waiting for the
# next request, the client will still be waiting for the rest of the
# response, and nobody will be happy.
# Shrink it by a single byte
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '4', 'Etag': md5hex("cccc")},
'cccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
self.assertEqual(body, b'aaaaabbbbbccccdddddeeeee')
def test_get_undersize_segment_range(self):
# Shrink it by a single byte
self.app.register(
'GET', '/v1/AUTH_test/c/seg_03',
swob.HTTPOk, {'Content-Length': '4', 'Etag': md5hex("cccc")},
'cccc')
req = swob.Request.blank(
'/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-14'})
status, headers, body = self.call_dlo(req)
headers = HeaderKeyDict(headers)
self.assertEqual(status, '206 Partial Content') # sanity check
self.assertEqual(headers.get('Content-Length'), '15') # sanity check
self.assertEqual(body, b'aaaaabbbbbcccc')
def test_get_with_auth_overridden(self):
auth_got_called = [0]
def my_auth(req):
auth_got_called[0] += 1
return None
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
environ={'REQUEST_METHOD': 'GET',
'swift.authorize': my_auth})
status, headers, body = self.call_dlo(req)
self.assertTrue(auth_got_called[0] > 1)
class TestDloConfiguration(unittest.TestCase):
"""
For backwards compatibility, we will read a couple of values out of the
proxy's config section if we don't have any config values.
"""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_skip_defaults_if_configured(self):
# The presence of even one config value in our config section means we
# won't go looking for the proxy config at all.
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:dlo]
use = egg:swift#dlo
max_get_time = 3600
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_segments_per_sec = 7
rate_limit_after_segment = 13
max_get_time = 2900
""")
conffile = tempfile.NamedTemporaryFile(mode='w')
conffile.write(proxy_conf)
conffile.flush()
mware = dlo.filter_factory({
'max_get_time': '3600',
'__file__': conffile.name
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(10, mware.rate_limit_after_segment)
self.assertEqual(3600, mware.max_get_time)
conffile.close()
def test_finding_defaults_from_file(self):
# If DLO has no config vars, go pull them from the proxy server's
# config section
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:dlo]
use = egg:swift#dlo
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_after_segment = 13
set max_get_time = 2900
""")
conffile = tempfile.NamedTemporaryFile(mode='w')
conffile.write(proxy_conf)
conffile.flush()
mware = dlo.filter_factory({
'__file__': conffile.name
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(13, mware.rate_limit_after_segment)
self.assertEqual(2900, mware.max_get_time)
conffile.close()
def test_finding_defaults_from_dir(self):
# If DLO has no config vars, go pull them from the proxy server's
# config section
proxy_conf1 = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors dlo ye-olde-proxy-server
""")
proxy_conf2 = dedent("""
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:dlo]
use = egg:swift#dlo
[app:ye-olde-proxy-server]
use = egg:swift#proxy
rate_limit_after_segment = 13
max_get_time = 2900
""")
conf_dir = self.tmpdir
conffile1 = tempfile.NamedTemporaryFile(mode='w',
dir=conf_dir, suffix='.conf')
conffile1.write(proxy_conf1)
conffile1.flush()
conffile2 = tempfile.NamedTemporaryFile(mode='w',
dir=conf_dir, suffix='.conf')
conffile2.write(proxy_conf2)
conffile2.flush()
mware = dlo.filter_factory({
'__file__': conf_dir
})("no app here")
self.assertEqual(1, mware.rate_limit_segments_per_sec)
self.assertEqual(13, mware.rate_limit_after_segment)
self.assertEqual(2900, mware.max_get_time)
conffile1.close()
conffile2.close()
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_dlo.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
from contextlib import contextmanager
import errno
import json
import mock
import os
from posix import stat_result, statvfs_result
from shutil import rmtree
import tempfile
import time
import unittest
from unittest import TestCase
from swift import __version__ as swiftver
from swift.common import ring, utils
from swift.common.recon import RECON_RELINKER_FILE, RECON_DRIVE_FILE, \
DEFAULT_RECON_CACHE_PATH, server_type_to_recon_file
from swift.common.swob import Request
from swift.common.middleware import recon
from swift.common.storage_policy import StoragePolicy
from test.debug_logger import debug_logger
from test.unit import patch_policies
def fake_check_mount(a, b):
raise OSError('Input/Output Error')
def fail_os_listdir():
raise OSError('No such file or directory')
def fail_io_open(file_path, open_mode):
raise IOError('No such file or directory')
class FakeApp(object):
def __call__(self, env, start_response):
return b"FAKE APP"
def start_response(*args):
pass
class FakeFromCache(object):
def __init__(self, out=None):
self.fakeout = out
self.fakeout_calls = []
def fake_from_recon_cache(self, *args, **kwargs):
self.fakeout_calls.append((args, kwargs))
return self.fakeout
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def __next__(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
next = __next__
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return next(self.output_iter)
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, isdir_out=None, ismount_out=False,
statvfs_out=None):
self.ls_output = ls_out
self.isdir_output = isdir_out
self.ismount_output = ismount_out
self.statvfs_output = statvfs_out
self.listdir_calls = []
self.isdir_calls = []
self.ismount_calls = []
self.statvfs_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_isdir(self, *args, **kwargs):
self.isdir_calls.append((args, kwargs))
return self.isdir_output
def fake_ismount(self, *args, **kwargs):
self.ismount_calls.append((args, kwargs))
if isinstance(self.ismount_output, Exception):
raise self.ismount_output
else:
return self.ismount_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
class FakeRecon(object):
def __init__(self):
self.fake_replication_rtype = None
self.fake_updater_rtype = None
self.fake_auditor_rtype = None
self.fake_expirer_rtype = None
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_get_device_info(self):
return {"/srv/1/node": ["sdb1"]}
def fake_replication(self, recon_type):
self.fake_replication_rtype = recon_type
return {'replicationtest': "1"}
def fake_sharding(self):
return {"sharding_stats": "1"}
def fake_relinker(self):
return {"relinktest": "1"}
def fake_reconstruction(self):
return {'reconstructiontest': "1"}
def fake_updater(self, recon_type):
self.fake_updater_rtype = recon_type
return {'updatertest': "1"}
def fake_auditor(self, recon_type):
self.fake_auditor_rtype = recon_type
return {'auditortest': "1"}
def fake_expirer(self, recon_type):
self.fake_expirer_rtype = recon_type
return {'expirertest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_unmounted_empty(self):
return []
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_swiftconfmd5(self):
return {'/etc/swift/swift.conf': "abcdef"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def fake_driveaudit(self):
return {'driveaudittest': "1"}
def fake_time(self):
return {'timetest': "1"}
def nocontent(self):
return None
def raise_IOError(self, errno=None):
mock_obj = mock.MagicMock()
mock_obj.side_effect = IOError(errno, str(errno))
return mock_obj
def raise_ValueError(self, *args, **kwargs):
raise ValueError
def raise_Exception(self, *args, **kwargs):
raise Exception
@patch_policies(legacy_only=True)
class TestReconSuccess(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(prefix='swift_recon_md5_test')
utils.mkdirs(self.tempdir)
self.app = self._get_app()
self.mockos = MockOS()
self.fakecache = FakeFromCache()
self.real_listdir = os.listdir
self.real_isdir = os.path.isdir
self.real_ismount = utils.ismount
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.isdir = self.mockos.fake_isdir
utils.ismount = self.mockos.fake_ismount
os.statvfs = self.mockos.fake_statvfs
self.real_from_cache = self.app._from_recon_cache
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
self.frecon = FakeRecon()
self.app.logger = debug_logger()
# replace hash md5 implementation of the md5_hash_for_file function
mock_hash_for_file = mock.patch(
'swift.common.middleware.recon.md5_hash_for_file',
lambda f, **kwargs: 'hash-' + os.path.basename(f))
self.addCleanup(mock_hash_for_file.stop)
mock_hash_for_file.start()
self.ring_part_shift = 5
self.ring_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6200,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6200,
'device': 'sdd1'}]
self._create_rings()
def tearDown(self):
os.listdir = self.real_listdir
os.path.isdir = self.real_isdir
utils.ismount = self.real_ismount
os.statvfs = self.real_statvfs
del self.mockos
self.app._from_recon_cache = self.real_from_cache
del self.fakecache
rmtree(self.tempdir)
def _get_app(self):
app = recon.ReconMiddleware(FakeApp(), {'swift_dir': self.tempdir})
return app
def _create_ring(self, ringpath, replica_map, devs, part_shift):
ring.RingData(replica_map, devs, part_shift).save(ringpath,
mtime=None)
def _create_rings(self):
# make the rings unique so they have different md5 sums
rings = {
'account.ring.gz': [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])],
'container.ring.gz': [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])],
'object.ring.gz': [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])],
'object-1.ring.gz': [
array.array('H', [1, 0, 1, 0]),
array.array('H', [1, 0, 1, 0]),
array.array('H', [4, 3, 4, 3])],
'object-2.ring.gz': [
array.array('H', [1, 1, 1, 0]),
array.array('H', [1, 0, 1, 3]),
array.array('H', [4, 2, 4, 3])]
}
for ringfn, replica_map in rings.items():
ringpath = os.path.join(self.tempdir, ringfn)
self._create_ring(ringpath, replica_map, self.ring_devs,
self.ring_part_shift)
def _full_recon_path(self, server_type, recon_file=None):
if server_type:
recon_file = server_type_to_recon_file(server_type)
return os.path.join(DEFAULT_RECON_CACHE_PATH, recon_file)
@patch_policies([
StoragePolicy(0, 'stagecoach'),
StoragePolicy(1, 'pinto', is_deprecated=True),
StoragePolicy(2, 'toyota', is_default=True),
])
def test_get_ring_md5(self):
# We should only see configured and present rings, so to handle the
# "normal" case just patch the policies to match the existing rings.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-1.ring.gz' % self.tempdir:
'hash-object-1.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-{1,2}.ring.gz should both appear as they are present on disk
# and were configured as policies.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
def test_get_ring_md5_ioerror_produces_none_hash(self):
# Ring files that are present but produce an IOError on read should
# still produce a ringmd5 entry with a None for the hash. Note that
# this is different than if an expected ring file simply doesn't exist,
# in which case it is excluded altogether from the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir: None}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
def test_get_ring_md5_failed_ring_hash_recovers_without_restart(self):
# Ring files that are present but produce an IOError on read will
# show a None hash, but if they can be read later their hash
# should become available in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir: None}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
# If we fix a ring and it can be read again, its hash should then
# appear using the same app instance
def fake_hash_for_file(fn):
if 'object' not in fn:
raise IOError
return 'hash-' + os.path.basename(fn)
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz'}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
fake_hash_for_file):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'stagecoach'),
StoragePolicy(2, 'bike', is_default=True),
StoragePolicy(3502, 'train')
])
def test_get_ring_md5_missing_ring_recovers_without_restart(self):
# If a configured ring is missing when the app is instantiated, but is
# later moved into place, we shouldn't need to restart object-server
# for it to appear in recon.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-1.ring.gz should not appear as it's present but unconfigured.
# object-3502.ring.gz should not appear as it's configured but not
# (yet) present.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
# Simulate the configured policy's missing ringfile being moved into
# place during runtime
ringfn = 'object-3502.ring.gz'
ringpath = os.path.join(self.tempdir, ringfn)
ringmap = [array.array('H', [1, 2, 1, 4]),
array.array('H', [4, 0, 1, 3]),
array.array('H', [1, 1, 0, 3])]
self._create_ring(os.path.join(self.tempdir, ringfn),
ringmap, self.ring_devs, self.ring_part_shift)
expt_out[ringpath] = 'hash-' + ringfn
# We should now see it in the ringmd5 response, without a restart
# (using the same app instance)
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'stagecoach', is_default=True),
StoragePolicy(2, 'bike'),
StoragePolicy(2305, 'taxi')
])
def test_get_ring_md5_excludes_configured_missing_obj_rings(self):
# Object rings that are configured but missing aren't meant to appear
# in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-1.ring.gz should not appear as it's present but unconfigured.
# object-2305.ring.gz should not appear as it's configured but not
# present.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
])
def test_get_ring_md5_excludes_unconfigured_present_obj_rings(self):
# Object rings that are present but not configured in swift.conf
# aren't meant to appear in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-{1,2}.ring.gz should not appear as they are present on disk
# but were not configured as policies.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
def test_from_recon_cache(self):
oart = OpenAndReadTester(['{"notneeded": 5, "testkey1": "canhazio"}'])
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart.open)
self.assertEqual(oart.read_calls, [((), {})])
self.assertEqual(oart.open_calls, [(('test.cache', 'r'), {})])
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': 'canhazio'})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_ioerror(self):
oart = self.frecon.raise_IOError()
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.assertIn('Error reading recon cache file: ',
self.app.logger.get_lines_for_level('error'))
# Now try with ignore_missing but not ENOENT
self.app.logger.clear()
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart,
ignore_missing=True)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.assertIn('Error reading recon cache file: ',
self.app.logger.get_lines_for_level('error'))
# Now try again with ignore_missing with ENOENT
self.app.logger.clear()
oart = self.frecon.raise_IOError(errno.ENOENT)
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart,
ignore_missing=True)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.assertEqual(self.app.logger.get_lines_for_level('error'), [])
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_valueerror(self):
oart = self.frecon.raise_ValueError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.assertIn('Error parsing recon cache file: ',
self.app.logger.get_lines_for_level('error'))
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_exception(self):
oart = self.frecon.raise_Exception
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.assertIn('Error retrieving recon data: ',
self.app.logger.get_lines_for_level('error'))
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_get_mounted(self):
mounts_content = [
'rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,'
'mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,'
'ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4'
' rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,'
'logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [
{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/'
'e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEqual(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEqual(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEqual(oart.read_calls, [((), {})])
self.assertEqual(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEqual(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEqual(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEqual(rv, meminfo_resp)
def test_get_async_info(self):
now = time.time()
from_cache_response = {'async_pending': 5, 'async_pending_last': now}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_async_info()
self.assertEqual(self.fakecache.fakeout_calls,
[((['async_pending', 'async_pending_last'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, {'async_pending': 5, 'async_pending_last': now})
def test_get_replication_info_account(self):
from_cache_response = {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('account')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
self._full_recon_path('account')), {})])
self.assertEqual(rv, {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25})
def test_get_replication_info_container(self):
from_cache_response = {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
self._full_recon_path('container')),
{})])
self.assertEqual(rv, {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25})
def test_get_replication_object(self):
from_cache_response = {
"replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last', 'object_replication_time',
'object_replication_last'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, {
"replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25})
def test_get_replication_info_unrecognized(self):
rv = self.app.get_replication_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_reconstruction(self):
from_cache_response = {
"object_reconstruction_time": 0.2615511417388916,
"object_reconstruction_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_reconstruction_info()
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_reconstruction_last',
'object_reconstruction_time'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {
"object_reconstruction_time": 0.2615511417388916,
"object_reconstruction_last": 1357969645.25})
def test_get_updater_info_container(self):
from_cache_response = {"container_updater_sweep": 18.476239919662476}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['container_updater_sweep'],
self._full_recon_path('container')), {})])
self.assertEqual(rv, {"container_updater_sweep": 18.476239919662476})
def test_get_updater_info_object(self):
from_cache_response = {"object_updater_sweep": 0.79848217964172363}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_updater_sweep'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, {"object_updater_sweep": 0.79848217964172363})
def test_get_updater_info_unrecognized(self):
rv = self.app.get_updater_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_expirer_info_object(self):
from_cache_response = {'object_expiration_pass': 0.79848217964172363,
'expired_last_pass': 99}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_expirer_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_expiration_pass', 'expired_last_pass'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, from_cache_response)
def test_get_auditor_info_account(self):
from_cache_response = {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('account')
self.assertEqual(self.fakecache.fakeout_calls,
[((['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
self._full_recon_path('account')), {})])
self.assertEqual(rv, {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"})
def test_get_auditor_info_container(self):
from_cache_response = {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
self._full_recon_path('container')), {})])
self.assertEqual(rv, {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"})
def test_get_auditor_info_object(self):
from_cache_response = {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}})
def test_get_auditor_info_object_parallel_once(self):
from_cache_response = {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
self._full_recon_path('object')), {})])
self.assertEqual(rv, {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}})
def test_get_auditor_info_unrecognized(self):
rv = self.app.get_auditor_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_unmounted(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_excludes_files(self):
unmounted_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_all_mounted(self):
unmounted_resp = []
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = True
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_checkmount_fail(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': 'brokendrive'}]
self.mockos.ls_output = ['fakeone']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_no_mounts(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = []
self.mockos.ls_output = []
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls, [])
self.assertEqual(rv, unmounted_resp)
def test_get_diskusage(self):
# posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351,
# f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content = (4096, 4096, 1963185, 1113075, 1013351, 498736,
397839, 397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.statvfs_output = statvfs_content
self.mockos.ismount_output = True
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.statvfs_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
def test_get_diskusage_excludes_files(self):
du_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(self.mockos.statvfs_calls, [])
self.assertEqual(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'brokendrive', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
@mock.patch("swift.common.middleware.recon.check_mount", fake_check_mount)
def test_get_diskusage_oserror(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'Input/Output Error', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
rv = self.app.get_diskusage()
self.assertEqual(rv, du_resp)
def test_get_quarantine_count(self):
dirs = [['sda'], ['accounts', 'containers', 'objects', 'objects-1']]
self.mockos.ismount_output = True
def fake_lstat(*args, **kwargs):
# posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
return stat_result((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
def fake_exists(*args, **kwargs):
return True
def fake_listdir(*args, **kwargs):
return dirs.pop(0)
with mock.patch("os.lstat", fake_lstat):
with mock.patch("os.path.exists", fake_exists):
with mock.patch("os.listdir", fake_listdir):
rv = self.app.get_quarantine_count()
self.assertEqual(rv, {'objects': 4, 'accounts': 2, 'policies':
{'1': {'objects': 2}, '0': {'objects': 2}},
'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
self.app.get_socket_info(openr=oart.open)
self.assertEqual(oart.open_calls, [
(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
def test_get_driveaudit_info(self):
from_cache_response = {'drive_audit_errors': 7}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_driveaudit_error()
self.assertEqual(self.fakecache.fakeout_calls,
[((['drive_audit_errors'],
self._full_recon_path(
None, recon_file=RECON_DRIVE_FILE)), {})])
self.assertEqual(rv, {'drive_audit_errors': 7})
def test_get_time(self):
def fake_time():
return 1430000000.0
with mock.patch("time.time", fake_time):
now = fake_time()
rv = self.app.get_time()
self.assertEqual(rv, now)
def test_get_sharding_info(self):
from_cache_response = {
"sharding_stats": {
"attempted": 0,
"deferred": 0,
"diff": 0,
"diff_capped": 0,
"empty": 0,
"failure": 0,
"hashmatch": 0,
"no_change": 0,
"remote_merge": 0,
"remove": 0,
"rsync": 0,
"start": 1614136398.5729735,
"success": 0,
"ts_repl": 0,
"sharding": {
"audit_root": {
"attempted": 0,
"failure": 0,
"success": 0,
},
"audit_shard": {
"attempted": 0,
"failure": 0,
"success": 0,
},
"cleaved": {
"attempted": 0,
"failure": 0,
"max_time": 0,
"min_time": 0,
"success": 0,
},
"created": {
"attempted": 0,
"failure": 0,
"success": 0,
},
"misplaced": {
"attempted": 0,
"failure": 0,
"found": 0,
"placed": 0,
"success": 0,
"unplaced": 0,
},
"scanned": {
"attempted": 0,
"failure": 0,
"found": 0,
"max_time": 0,
"min_time": 0,
"success": 0,
},
"sharding_candidates": {
"found": 0,
"top": [],
},
"visited": {
"attempted": 0,
"completed": 0,
"failure": 0,
"skipped": 6,
"success": 0,
}
},
},
"sharding_time": 600,
"sharding_last": 1614136398.6680582}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_sharding_info()
self.assertEqual(self.fakecache.fakeout_calls, [
((['sharding_stats', 'sharding_time', 'sharding_last'],
self._full_recon_path('container')), {})])
self.assertEqual(rv, from_cache_response)
def test_get_relinker_info(self):
from_cache_response = {
"devices": {
"sdb3": {
"parts_done": 523,
"policies": {
"1": {
"next_part_power": 11,
"start_time": 1618998724.845616,
"stats": {
"errors": 0,
"files": 1630,
"hash_dirs": 1630,
"linked": 1630,
"policies": 1,
"removed": 0
},
"timestamp": 1618998730.24672,
"total_parts": 1029,
"total_time": 5.400741815567017
}},
"start_time": 1618998724.845946,
"stats": {
"errors": 0,
"files": 836,
"hash_dirs": 836,
"linked": 836,
"removed": 0
},
"timestamp": 1618998730.24672,
"total_parts": 523,
"total_time": 5.400741815567017
},
"sdb7": {
"parts_done": 506,
"policies": {
"1": {
"next_part_power": 11,
"part_power": 10,
"parts_done": 506,
"start_time": 1618998724.845616,
"stats": {
"errors": 0,
"files": 794,
"hash_dirs": 794,
"linked": 794,
"removed": 0
},
"step": "relink",
"timestamp": 1618998730.166175,
"total_parts": 506,
"total_time": 5.320528984069824
}
},
"start_time": 1618998724.845616,
"stats": {
"errors": 0,
"files": 794,
"hash_dirs": 794,
"linked": 794,
"removed": 0
},
"timestamp": 1618998730.166175,
"total_parts": 506,
"total_time": 5.320528984069824
}
},
"workers": {
"100": {
"drives": ["sda1"],
"return_code": 0,
"timestamp": 1618998730.166175}
}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_relinker_info()
self.assertEqual(self.fakecache.fakeout_calls,
[((['devices', 'workers'],
self._full_recon_path(
None, recon_file=RECON_RELINKER_FILE)),
{'ignore_missing': True})])
self.assertEqual(rv, from_cache_response)
class TestReconMiddleware(unittest.TestCase):
def fake_list(self, path):
return ['a', 'b']
def setUp(self):
self.frecon = FakeRecon()
self.real_listdir = os.listdir
os.listdir = self.fake_list
self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"})
self.real_app_get_device_info = self.app.get_device_info
self.real_app_get_swift_conf_md5 = self.app.get_swift_conf_md5
os.listdir = self.real_listdir
# self.app.object_recon = True
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_device_info = self.frecon.fake_get_device_info
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_reconstruction_info = self.frecon.fake_reconstruction
self.app.get_auditor_info = self.frecon.fake_auditor
self.app.get_updater_info = self.frecon.fake_updater
self.app.get_expirer_info = self.frecon.fake_expirer
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
self.app.get_driveaudit_error = self.frecon.fake_driveaudit
self.app.get_time = self.frecon.fake_time
self.app.get_sharding_info = self.frecon.fake_sharding
self.app.get_relinker_info = self.frecon.fake_relinker
def test_recon_get_mem(self):
get_mem_resp = [b'{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_mem_resp)
def test_recon_get_version(self):
req = Request.blank('/recon/version',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [json.dumps({
'version': swiftver}).encode('ascii')])
def test_recon_get_load(self):
get_load_resp = [b'{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = [b'{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_async_resp)
def test_get_device_info(self):
get_device_resp = [b'{"/srv/1/node": ["sdb1"]}']
req = Request.blank('/recon/devices',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_device_resp)
def test_reconstruction_info(self):
get_reconstruction_resp = [b'{"reconstructiontest": "1"}']
req = Request.blank('/recon/reconstruction/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_reconstruction_resp)
def test_recon_get_replication_notype(self):
get_replication_resp = [b'{"replicationtest": "1"}']
req = Request.blank('/recon/replication',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_replication_all(self):
get_replication_resp = [b'{"replicationtest": "1"}']
# test account
req = Request.blank('/recon/replication/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'account')
self.frecon.fake_replication_rtype = None
# test container
req = Request.blank('/recon/replication/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'container')
self.frecon.fake_replication_rtype = None
# test object
req = Request.blank('/recon/replication/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_auditor_invalid(self):
get_auditor_resp = [b'Invalid path: /recon/auditor/invalid']
req = Request.blank('/recon/auditor/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
def test_recon_get_auditor_notype(self):
get_auditor_resp = [b'Invalid path: /recon/auditor']
req = Request.blank('/recon/auditor',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
def test_recon_get_auditor_all(self):
get_auditor_resp = [b'{"auditortest": "1"}']
req = Request.blank('/recon/auditor/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'account')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'container')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'object')
self.frecon.fake_auditor_rtype = None
def test_recon_get_updater_invalid(self):
get_updater_resp = [b'Invalid path: /recon/updater/invalid']
req = Request.blank('/recon/updater/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_updater_notype(self):
get_updater_resp = [b'Invalid path: /recon/updater']
req = Request.blank('/recon/updater',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_updater(self):
get_updater_resp = [b'{"updatertest": "1"}']
req = Request.blank('/recon/updater/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(self.frecon.fake_updater_rtype, 'container')
self.frecon.fake_updater_rtype = None
self.assertEqual(resp, get_updater_resp)
req = Request.blank('/recon/updater/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
self.assertEqual(self.frecon.fake_updater_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_expirer_invalid(self):
get_updater_resp = [b'Invalid path: /recon/expirer/invalid']
req = Request.blank('/recon/expirer/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_expirer_notype(self):
get_updater_resp = [b'Invalid path: /recon/expirer']
req = Request.blank('/recon/expirer',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_expirer_object(self):
get_expirer_resp = [b'{"expirertest": "1"}']
req = Request.blank('/recon/expirer/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_expirer_resp)
self.assertEqual(self.frecon.fake_expirer_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_mounted(self):
get_mounted_resp = [b'{"mountedtest": "1"}']
req = Request.blank('/recon/mounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_mounted_resp)
def test_recon_get_unmounted(self):
get_unmounted_resp = [b'{"unmountedtest": "1"}']
self.app.get_unmounted = self.frecon.fake_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_unmounted_resp)
def test_recon_get_unmounted_empty(self):
get_unmounted_resp = b'[]'
self.app.get_unmounted = self.frecon.fake_unmounted_empty
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = b''.join(self.app(req.environ, start_response))
self.assertEqual(resp, get_unmounted_resp)
def test_recon_get_diskusage(self):
get_diskusage_resp = [b'{"diskusagetest": "1"}']
req = Request.blank('/recon/diskusage',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_diskusage_resp)
def test_recon_get_ringmd5(self):
get_ringmd5_resp = [b'{"ringmd5test": "1"}']
req = Request.blank('/recon/ringmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_ringmd5_resp)
def test_recon_get_swiftconfmd5(self):
get_swiftconfmd5_resp = [b'{"/etc/swift/swift.conf": "abcdef"}']
req = Request.blank('/recon/swiftconfmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_swiftconfmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = [b'{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_quarantined_resp)
def test_recon_get_sockstat(self):
get_sockstat_resp = [b'{"sockstattest": "1"}']
req = Request.blank('/recon/sockstat',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_sockstat_resp)
def test_recon_invalid_path(self):
req = Request.blank('/recon/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'Invalid path: /recon/invalid'])
def test_no_content(self):
self.app.get_load = self.frecon.nocontent
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [b'Internal server error.'])
def test_recon_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, b'FAKE APP')
def test_recon_get_driveaudit(self):
get_driveaudit_resp = [b'{"driveaudittest": "1"}']
req = Request.blank('/recon/driveaudit',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_driveaudit_resp)
def test_recon_get_time(self):
get_time_resp = [b'{"timetest": "1"}']
req = Request.blank('/recon/time',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_time_resp)
def test_get_device_info_function(self):
"""Test get_device_info function call success"""
resp = self.app.get_device_info()
self.assertEqual(['sdb1'], resp['/srv/1/node'])
def test_get_device_info_fail(self):
"""Test get_device_info failure by failing os.listdir"""
os.listdir = fail_os_listdir
resp = self.real_app_get_device_info()
os.listdir = self.real_listdir
device_path = list(resp)[0]
self.assertIsNone(resp[device_path])
def test_get_swift_conf_md5(self):
"""Test get_swift_conf_md5 success"""
resp = self.app.get_swift_conf_md5()
self.assertEqual('abcdef', resp['/etc/swift/swift.conf'])
def test_get_swift_conf_md5_fail(self):
"""Test get_swift_conf_md5 failure by failing file open"""
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
resp = self.real_app_get_swift_conf_md5()
self.assertIsNone(resp['/etc/swift/swift.conf'])
def test_recon_get_sharding(self):
get_sharding_resp = [
b'{"sharding_stats": "1"}']
req = Request.blank('/recon/sharding',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_sharding_resp)
def test_recon_get_relink(self):
get_recon_resp = [
b'{"relinktest": "1"}']
req = Request.blank('/recon/relinker',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_recon_resp)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_recon.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import numbers
import unittest
import os
import tarfile
import zlib
import mock
import six
from io import BytesIO
from shutil import rmtree
from tempfile import mkdtemp
from eventlet import sleep
from mock import patch, call
from test.debug_logger import debug_logger
from test.unit.common.middleware.helpers import FakeSwift
from swift.common import utils, constraints, registry
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware import bulk
from swift.common.swob import Request, Response, HTTPException, \
HTTPNoContent, HTTPCreated
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
class FakeApp(object):
def __init__(self):
self.calls = 0
self.delete_paths = []
self.put_paths = []
self.max_pathlen = 100
self.del_cont_total_calls = 2
self.del_cont_cur_call = 0
def __call__(self, env, start_response):
self.calls += 1
if env.get('swift.source') in ('EA', 'BD'):
assert not env.get('swift.proxy_access_log_made')
if not six.PY2:
# Check that it's valid WSGI
assert all(0 <= ord(c) <= 255 for c in env['PATH_INFO'])
if env['REQUEST_METHOD'] == 'PUT':
self.put_paths.append(env['PATH_INFO'])
if env['PATH_INFO'].startswith('/unauth/'):
if env['PATH_INFO'].endswith('/c/f_ok'):
return Response(status='204 No Content')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/create_cont/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/create_cont_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='403 Forbidden')(env, start_response)
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].startswith('/create_obj_unauth/'):
if env['PATH_INFO'].endswith('/cont'):
return Response(status='201 Created')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/tar_works/'):
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/tar_works_cont_head_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
if len(env['PATH_INFO']) > 100:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if (env['PATH_INFO'].startswith('/delete_works/')
and env['REQUEST_METHOD'] == 'DELETE'):
self.delete_paths.append(env['PATH_INFO'])
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
if env['PATH_INFO'].endswith('404'):
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].endswith('badutf8'):
return Response(
status='412 Precondition Failed')(env, start_response)
return Response(status='204 No Content')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_fail/'):
return Response(status='409 Conflict')(env, start_response)
if env['PATH_INFO'].startswith('/broke/'):
return Response(status='500 Internal Error')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_success_after_attempts/'):
if self.del_cont_cur_call < self.del_cont_total_calls:
self.del_cont_cur_call += 1
return Response(status='409 Conflict')(env, start_response)
else:
return Response(status='204 No Content')(env, start_response)
def build_dir_tree(start_path, tree_obj):
if isinstance(tree_obj, list):
for obj in tree_obj:
build_dir_tree(start_path, obj)
return
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.items():
dir_path = os.path.join(start_path, dir_name)
os.mkdir(dir_path)
build_dir_tree(dir_path, obj)
return
if six.PY2 and isinstance(tree_obj, six.text_type):
tree_obj = tree_obj.encode('utf8')
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
with open(obj_path, 'w+') as tree_file:
tree_file.write('testing')
return
raise TypeError("can't build tree from %r" % tree_obj)
def build_tar_tree(tar, start_path, tree_obj, base_path=''):
if six.PY2:
if isinstance(start_path, six.text_type):
start_path = start_path.encode('utf8')
if isinstance(tree_obj, six.text_type):
tree_obj = tree_obj.encode('utf8')
else:
if isinstance(start_path, bytes):
start_path = start_path.decode('utf8', 'surrogateescape')
if isinstance(tree_obj, bytes):
tree_obj = tree_obj.decode('utf8', 'surrogateescape')
if isinstance(tree_obj, list):
for obj in tree_obj:
build_tar_tree(tar, start_path, obj, base_path=base_path)
return
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.items():
if six.PY2 and isinstance(dir_name, six.text_type):
dir_name = dir_name.encode('utf8')
elif not six.PY2 and isinstance(dir_name, bytes):
dir_name = dir_name.decode('utf8', 'surrogateescape')
dir_path = os.path.join(start_path, dir_name)
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
tar_info.type = tarfile.DIRTYPE
tar.addfile(tar_info)
build_tar_tree(tar, dir_path, obj, base_path=base_path)
return
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
tar_info = tarfile.TarInfo('./' + obj_path[len(base_path):])
tar.addfile(tar_info)
return
raise TypeError("can't build tree from %r" % tree_obj)
class TestUntarMetadata(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.bulk = bulk.filter_factory({})(self.app)
self.bulk.logger = debug_logger()
self.testdir = mkdtemp(suffix='tmp_test_bulk')
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_extract_metadata(self):
self.app.register('HEAD', '/v1/a/c?extract-archive=tar',
HTTPNoContent, {}, None)
self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar',
HTTPCreated, {}, None)
self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar',
HTTPCreated, {}, None)
# It's a real pain to instantiate TarInfo objects directly; they
# really want to come from a file on disk or a tarball. So, we write
# out some files and add pax headers to them as they get placed into
# the tarball.
with open(os.path.join(self.testdir, "obj1"), "w") as fh1:
fh1.write("obj1 contents\n")
with open(os.path.join(self.testdir, "obj2"), "w") as fh2:
fh2.write("obj2 contents\n")
tar_ball = BytesIO()
tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w",
format=tarfile.PAX_FORMAT)
# With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with
# extended attribute user.thingy = dingy gets put into the tarfile
# with pax_headers containing key/value pair
# (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type
# unicode, not type str).
#
# With BSD tar (libarchive), you get key/value pair
# (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as
# gratuitous incompatibility.
#
# Still, we'll support uploads with both. Just heap more code on the
# problem until you can forget it's under there.
with open(os.path.join(self.testdir, "obj1"), 'rb') as fh1:
tar_info1 = tar_file.gettarinfo(fileobj=fh1,
arcname="obj1")
tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \
u'application/food-diary'
tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \
u'sopa de albóndigas'
tar_info1.pax_headers[
u'SCHILY.xattr.user.meta.afternoon-snack'] = \
u'gigantic bucket of coffee'
tar_file.addfile(tar_info1, fh1)
with open(os.path.join(self.testdir, "obj2"), 'rb') as fh2:
tar_info2 = tar_file.gettarinfo(fileobj=fh2,
arcname="obj2")
tar_info2.pax_headers[
u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert'
tar_info2.pax_headers[
u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy'
tar_info2.pax_headers[
u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped'
tar_file.addfile(tar_info2, fh2)
tar_ball.seek(0)
req = Request.blank('/v1/a/c?extract-archive=tar')
req.environ['REQUEST_METHOD'] = 'PUT'
req.environ['wsgi.input'] = tar_ball
# Since there should be a proxy-logging left of us...
req.environ['swift.proxy_access_log_made'] = True
req.headers.update({
'transfer-encoding': 'chunked',
'accept': 'application/json;q=1.0',
'X-Delete-At': '1577383915',
'X-Object-Meta-Dog': 'Rantanplan',
'X-Horse': 'Jolly Jumper',
'X-Object-Meta-Cat': 'tabby',
})
resp = req.get_response(self.bulk)
self.assertEqual(resp.status_int, 200)
# sanity check to make sure the upload worked
upload_status = utils.json.loads(resp.body)
self.assertEqual(upload_status['Number Files Created'], 2)
put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2])
self.assertEqual(
put1_headers.get('Content-Type'),
'application/food-diary')
self.assertEqual(
put1_headers.get('X-Object-Meta-Lunch'),
'sopa de alb\xc3\xb3ndigas')
self.assertEqual(
put1_headers.get('X-Object-Meta-Afternoon-Snack'),
'gigantic bucket of coffee')
self.assertEqual(put1_headers.get('X-Delete-At'), '1577383915')
self.assertEqual(put1_headers.get('X-Object-Meta-Dog'), 'Rantanplan')
self.assertEqual(put1_headers.get('X-Object-Meta-Cat'), 'tabby')
self.assertIsNone(put1_headers.get('X-Horse'))
put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2])
self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert')
self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy')
self.assertIsNone(put2_headers.get('Content-Type'))
self.assertIsNone(put2_headers.get('X-Object-Meta-Blah'))
self.assertEqual(put2_headers.get('X-Delete-At'), '1577383915')
self.assertEqual(put2_headers.get('X-Object-Meta-Dog'), 'Rantanplan')
self.assertIsNone(put2_headers.get('X-Horse'))
class TestUntar(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory({})(self.app)
self.bulk.logger = debug_logger()
self.testdir = mkdtemp(suffix='tmp_test_bulk')
def tearDown(self):
self.app.calls = 0
rmtree(self.testdir, ignore_errors=1)
def handle_extract_and_iter(self, req, compress_format,
out_content_type='application/json'):
iter = self.bulk.handle_extract_iter(
req, compress_format, out_content_type=out_content_type)
first_chunk = next(iter)
self.assertEqual(req.environ['eventlet.minimum_write_chunk_size'], 0)
resp_body = first_chunk + b''.join(iter)
return resp_body
def test_create_container_for_path(self):
req = Request.blank('/')
self.assertEqual(
self.bulk.create_container(req, '/create_cont/acc/cont'),
True)
self.assertEqual(self.app.calls, 2)
self.assertRaises(
bulk.CreateContainerError,
self.bulk.create_container,
req, '/create_cont_fail/acc/cont')
self.assertEqual(self.app.calls, 3)
def test_extract_tar_works(self):
# On systems where $TMPDIR is long (like OS X), we need to do this
# or else every upload will fail due to the path being too long.
self.app.max_pathlen += len(self.testdir)
for compress_format in ['', 'gz', 'bz2']:
base_name = 'base_works_%s' % compress_format
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', u'test obj \u2661']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': '../sub4 file1'}]},
{'sub_dir4': []},
]}]
build_dir_tree(self.testdir, dir_tree)
mode = 'w'
extension = ''
if compress_format:
mode += ':' + compress_format
extension += '.' + compress_format
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar' + extension),
mode=mode)
tar.add(os.path.join(self.testdir, base_name))
tar.close()
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, compress_format)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Files Created'], 6)
# test out xml
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(
req, compress_format, 'application/xml')
self.assertIn(
b'<response_status>201 Created</response_status>',
resp_body)
self.assertIn(
b'<number_files_created>6</number_files_created>',
resp_body)
# test out nonexistent format
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar',
headers={'Accept': 'good_xml'})
req.environ['REQUEST_METHOD'] = 'PUT'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
req.headers['transfer-encoding'] = 'chunked'
def fake_start_response(*args, **kwargs):
pass
app_iter = self.bulk(req.environ, fake_start_response)
resp_body = b''.join(app_iter)
self.assertIn(b'Response Status: 406', resp_body)
def test_extract_call(self):
base_name = 'base_works_gz'
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar.gz'),
mode='w:gz')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
def fake_start_response(*args, **kwargs):
pass
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar.gz')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
self.bulk(req.environ, fake_start_response)
self.assertEqual(self.app.calls, 1)
self.app.calls = 0
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
req.headers['transfer-encoding'] = 'Chunked'
req.method = 'PUT'
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEqual(self.app.calls, 7)
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=bad')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
t = self.bulk(req.environ, fake_start_response)
self.assertEqual(t, [b"Unsupported archive format"])
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'), 'rb')
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEqual(self.app.calls, 7)
def test_bad_container(self):
req = Request.blank('/invalid/', body=b'')
resp_body = self.handle_extract_and_iter(req, '')
self.assertIn(b'404 Not Found', resp_body)
def test_content_length_required(self):
req = Request.blank('/create_cont_fail/acc/cont')
resp_body = self.handle_extract_and_iter(req, '')
self.assertIn(b'411 Length Required', resp_body)
def test_bad_tar(self):
req = Request.blank('/create_cont_fail/acc/cont', body='')
def bad_open(*args, **kwargs):
raise zlib.error('bad tar')
with patch.object(tarfile, 'open', bad_open):
resp_body = self.handle_extract_and_iter(req, '')
self.assertIn(b'400 Bad Request', resp_body)
def build_tar(self, dir_tree=None):
if not dir_tree:
dir_tree = [
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
tar = tarfile.open(name=os.path.join(self.testdir, 'tar_fails.tar'),
mode='w')
build_tar_tree(tar, self.testdir, dir_tree,
base_path=self.testdir + '/')
tar.close()
return tar
def test_extract_tar_with_basefile(self):
dir_tree = [
'base_lvl_file', 'another_base_file',
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/')
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Files Created'], 4)
def test_extract_tar_fail_cont_401(self):
self.build_tar()
req = Request.blank('/unauth/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEqual(self.app.calls, 1)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '401 Unauthorized')
self.assertEqual(resp_data['Errors'], [])
def test_extract_tar_fail_obj_401(self):
self.build_tar()
req = Request.blank('/create_obj_unauth/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '401 Unauthorized')
self.assertEqual(
resp_data['Errors'],
[['cont/base_fails1/sub_dir1/sub1_file1', '401 Unauthorized']])
def test_extract_tar_fail_obj_name_len(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEqual(self.app.calls, 6)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Files Created'], 4)
self.assertEqual(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
def test_extract_tar_fail_compress_type(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, 'gz')
self.assertEqual(self.app.calls, 0)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(
resp_data['Response Body'].lower(),
'invalid tar file: not a gzip file')
def test_extract_tar_fail_max_failed_extractions(self):
self.build_tar()
with patch.object(self.bulk, 'max_failed_extractions', 1):
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEqual(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Files Created'], 3)
self.assertEqual(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
@patch.object(constraints, 'MAX_FILE_SIZE', 4)
def test_extract_tar_fail_max_file_size(self):
tar = self.build_tar()
dir_tree = [{'test': [{'sub_dir1': ['sub1_file1']}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, 'test'))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEqual(
resp_data['Errors'],
[['cont' + self.testdir + '/test/sub_dir1/sub1_file1',
'413 Request Entity Too Large']])
def test_extract_tar_fail_max_cont(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
with patch.object(self.bulk, 'max_containers', 1):
self.app.calls = 0
body = open(os.path.join(self.testdir, 'tar_fails.tar')).read()
req = Request.blank('/tar_works_cont_head_fail/acc/', body=body,
headers={'Accept': 'application/json'})
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEqual(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(
resp_data['Response Body'],
'More than 1 containers to create from tar.')
def test_extract_tar_fail_create_cont(self):
dir_tree = [{'base_fails1': [
{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'./sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEqual(self.app.calls, 5)
self.assertEqual(len(resp_data['Errors']), 5)
def test_extract_tar_fail_create_cont_value_err(self):
self.build_tar()
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
def bad_create(req, path):
raise ValueError('Test')
with patch.object(self.bulk, 'create_container', bad_create):
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEqual(self.app.calls, 0)
self.assertEqual(len(resp_data['Errors']), 5)
self.assertEqual(
resp_data['Errors'][0],
['cont/base_fails1/sub_dir1/sub1_file1', '400 Bad Request'])
def test_extract_tar_fail_unicode(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': [b'sub2\xdefile1', 'sub2_file2']},
{b'good_\xe2\x98\x83': [{'still_good': b'\xe2\x98\x83'}]},
{b'sub_\xdedir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'), 'rb')
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEqual(self.app.calls, 6)
self.assertEqual(resp_data['Number Files Created'], 3)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(
resp_data['Errors'],
[['sub_dir2/sub2%DEfile1', '412 Precondition Failed'],
['sub_%DEdir3/sub4_dir1/sub4_file1', '412 Precondition Failed']])
self.assertEqual(self.app.put_paths, [
'/tar_works/acc/sub_dir1/sub1_file1',
'/tar_works/acc/sub_dir2/sub2_file2',
'/tar_works/acc/good_\xe2\x98\x83/still_good/\xe2\x98\x83',
])
def test_get_response_body(self):
txt_body = bulk.get_response_body(
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']],
"doesn't matter for text")
self.assertIn(b'hey: there', txt_body)
xml_body = bulk.get_response_body(
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']],
'root_tag')
self.assertIn(b'>', xml_body)
self.assertTrue(xml_body.startswith(b'<root_tag>\n'))
self.assertTrue(xml_body.endswith(b'\n</root_tag>\n'))
class TestDelete(unittest.TestCase):
conf = {'delete_concurrency': 1} # default to old single-threaded behavior
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory(self.conf)(self.app)
self.bulk.logger = debug_logger()
def tearDown(self):
self.app.calls = 0
self.app.delete_paths = []
def handle_delete_and_iter(self, req, out_content_type='application/json'):
iter = self.bulk.handle_delete_iter(
req, out_content_type=out_content_type)
first_chunk = next(iter)
self.assertEqual(req.environ['eventlet.minimum_write_chunk_size'], 0)
resp_body = first_chunk + b''.join(iter)
return resp_body
def test_bulk_delete_uses_predefined_object_errors(self):
req = Request.blank('/delete_works/AUTH_Acc')
objs_to_delete = [
{'name': '/c/file_a'},
{'name': '/c/file_b', 'error': {'code': HTTP_NOT_FOUND,
'message': 'not found'}},
{'name': '/c/file_c', 'error': {'code': HTTP_UNAUTHORIZED,
'message': 'unauthorized'}},
{'name': '/c/file_d'}]
resp_body = b''.join(self.bulk.handle_delete_iter(
req, objs_to_delete=objs_to_delete,
out_content_type='application/json'))
self.assertEqual(set(self.app.delete_paths),
set(['/delete_works/AUTH_Acc/c/file_a',
'/delete_works/AUTH_Acc/c/file_d']))
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Number Deleted'], 2)
self.assertEqual(resp_data['Number Not Found'], 1)
self.assertEqual(resp_data['Errors'],
[['/c/file_c', 'unauthorized']])
def test_bulk_delete_works_with_POST_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEqual(set(self.app.delete_paths),
set(['/delete_works/AUTH_Acc/c/f',
'/delete_works/AUTH_Acc/c/f404']))
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 1)
self.assertEqual(resp_data['Number Not Found'], 1)
def test_bulk_delete_works_with_DELETE_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'DELETE'
resp_body = self.handle_delete_and_iter(req)
self.assertEqual(set(self.app.delete_paths),
set(['/delete_works/AUTH_Acc/c/f',
'/delete_works/AUTH_Acc/c/f404']))
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 1)
self.assertEqual(resp_data['Number Not Found'], 1)
def test_bulk_delete_bad_content_type(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'badformat'})
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json',
'Content-Type': 'text/xml'})
req.method = 'POST'
req.environ['wsgi.input'] = BytesIO(b'/c/f\n/c/f404')
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '406 Not Acceptable')
def test_bulk_delete_call_and_content_type(self):
def fake_start_response(*args, **kwargs):
self.assertEqual(args[1][0], ('Content-Type', 'application/json'))
req = Request.blank('/delete_works/AUTH_Acc?bulk-delete')
req.method = 'POST'
req.headers['Transfer-Encoding'] = 'chunked'
req.headers['Accept'] = 'application/json'
req.environ['wsgi.input'] = BytesIO(b'/c/f%20')
list(self.bulk(req.environ, fake_start_response)) # iterate over resp
self.assertEqual(
self.app.delete_paths, ['/delete_works/AUTH_Acc/c/f '])
self.assertEqual(self.app.calls, 1)
def test_bulk_delete_get_objs(self):
req = Request.blank('/delete_works/AUTH_Acc', body='1%20\r\n2\r\n')
req.method = 'POST'
with patch.object(self.bulk, 'max_deletes_per_request', 2):
results = self.bulk.get_objs_to_delete(req)
self.assertEqual(results, [{'name': '1 '}, {'name': '2'}])
with patch.object(self.bulk, 'max_path_length', 2):
results = []
req.environ['wsgi.input'] = BytesIO(b'1\n2\n3')
results = self.bulk.get_objs_to_delete(req)
self.assertEqual(results,
[{'name': '1'}, {'name': '2'}, {'name': '3'}])
with patch.object(self.bulk, 'max_deletes_per_request', 9):
with patch.object(self.bulk, 'max_path_length', 1):
req_body = '\n'.join([str(i) for i in range(10)])
req = Request.blank('/delete_works/AUTH_Acc', body=req_body)
self.assertRaises(
HTTPException, self.bulk.get_objs_to_delete, req)
def test_bulk_delete_works_extra_newlines_extra_quoting(self):
req = Request.blank('/delete_works/AUTH_Acc',
body='/c/f\n\n\n/c/f404\n\n\n/c/%2525',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEqual(
Counter(self.app.delete_paths),
Counter(['/delete_works/AUTH_Acc/c/f',
'/delete_works/AUTH_Acc/c/f404',
'/delete_works/AUTH_Acc/c/%25']))
self.assertEqual(self.app.calls, 3)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 2)
self.assertEqual(resp_data['Number Not Found'], 1)
def test_bulk_delete_too_many_newlines(self):
req = Request.blank('/delete_works/AUTH_Acc')
req.method = 'POST'
data = b'\n\n' * self.bulk.max_deletes_per_request
req.environ['wsgi.input'] = BytesIO(data)
req.content_length = len(data)
resp_body = self.handle_delete_and_iter(req)
self.assertIn(b'413 Request Entity Too Large', resp_body)
def test_bulk_delete_works_unicode(self):
body = (u'/c/ obj \u2661\r\n'.encode('utf8') +
b'c/ objbadutf8\r\n' +
b'/c/f\xdebadutf8\n')
req = Request.blank('/delete_works/AUTH_Acc', body=body,
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEqual(
dict(Counter(self.app.delete_paths)),
dict(Counter(['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1',
'/delete_works/AUTH_Acc/c/ objbadutf8'])))
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 1)
self.assertEqual(len(resp_data['Errors']), 2)
self.assertEqual(
dict(Counter(map(tuple, resp_data['Errors']))),
dict(Counter([('c/%20objbadutf8',
'412 Precondition Failed'),
('/c/f%DEbadutf8',
'412 Precondition Failed')])))
def test_bulk_delete_no_body(self):
req = Request.blank('/unauth/AUTH_acc/')
resp_body = self.handle_delete_and_iter(req)
self.assertIn(b'411 Length Required', resp_body)
def test_bulk_delete_no_files_in_body(self):
req = Request.blank('/unauth/AUTH_acc/', body=' ')
resp_body = self.handle_delete_and_iter(req)
self.assertIn(b'400 Bad Request', resp_body)
def test_bulk_delete_unauth(self):
req = Request.blank('/unauth/AUTH_acc/', body='/c/f\n/c/f_ok\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEqual(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Errors'], [['/c/f', '401 Unauthorized']])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Number Deleted'], 1)
def test_bulk_delete_500_resp(self):
req = Request.blank('/broke/AUTH_acc/', body='/c/f\nc/f2\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(
Counter(map(tuple, resp_data['Errors'])),
Counter([('/c/f', '500 Internal Error'),
('c/f2', '500 Internal Error')]))
self.assertEqual(resp_data['Response Status'], '502 Bad Gateway')
def test_bulk_delete_bad_path(self):
req = Request.blank('/delete_cont_fail/')
resp_body = self.handle_delete_and_iter(req)
self.assertIn(b'404 Not Found', resp_body)
def test_bulk_delete_container_delete(self):
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 0)
self.assertEqual(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual([], mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_fails(self):
self.bulk.retry_count = 3
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 0)
self.assertEqual(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2),
call(self.bulk.retry_interval ** 3)],
mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_success(self):
self.bulk.retry_count = 3
self.app.del_container_total = 2
req = Request.blank('/delete_cont_success_after_attempts/AUTH_Acc',
body='c\n', headers={'Accept': 'application/json'})
req.method = 'DELETE'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 1)
self.assertEqual(resp_data['Errors'], [])
self.assertEqual(resp_data['Response Status'], '200 OK')
self.assertEqual([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2)],
mock_sleep.call_args_list)
def test_bulk_delete_bad_file_too_long(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json'})
req.method = 'POST'
bad_file = 'c/' + ('1' * self.bulk.max_path_length)
data = b'/c/f\n' + bad_file.encode('ascii') + b'\n/c/f'
req.environ['wsgi.input'] = BytesIO(data)
req.headers['Transfer-Encoding'] = 'chunked'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Number Deleted'], 2)
self.assertEqual(resp_data['Errors'], [[bad_file, '400 Bad Request']])
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
def test_bulk_delete_bad_file_over_twice_max_length(self):
body = '/c/f\nc/' + ('123456' * self.bulk.max_path_length) + '\n'
req = Request.blank('/delete_works/AUTH_Acc', body=body)
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertIn(b'400 Bad Request', resp_body)
def test_bulk_delete_max_failures(self):
body = '\n'.join([
'/c/f1', '/c/f2', '/c/f3', '/c/f4', '/c/f5', '/c/f6',
])
req = Request.blank('/unauth/AUTH_Acc', body=body,
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch.object(self.bulk, 'max_failed_deletes', 2):
resp_body = self.handle_delete_and_iter(req)
# We know there should be at least max_failed_deletes, but there
# may be more as we clean up in-progress requests.
self.assertGreaterEqual(self.app.calls,
self.bulk.max_failed_deletes)
# As we're pulling things off the pile, we:
# - get delete result,
# - process the result,
# - check max_failed_deletes,
# - spawn another delete, repeat.
# As a result, we know our app calls should be *strictly* less.
# Note this means that when delete_concurrency is one,
# self.app.calls will exactly equal self.bulk.max_failed_deletes.
self.assertLess(self.app.calls,
self.bulk.max_failed_deletes +
self.bulk.delete_concurrency)
resp_data = utils.json.loads(resp_body)
self.assertEqual(resp_data['Response Status'], '400 Bad Request')
self.assertEqual(resp_data['Response Body'],
'Max delete failures exceeded')
self.assertIn(['/c/f1', '401 Unauthorized'], resp_data['Errors'])
self.assertIn(['/c/f2', '401 Unauthorized'], resp_data['Errors'])
class TestConcurrentDelete(TestDelete):
conf = {'delete_concurrency': 3}
def test_concurrency_set(self):
self.assertEqual(self.bulk.delete_concurrency, 3)
class TestConfig(unittest.TestCase):
def test_defaults(self):
expected_defaults = {
'delete_concurrency': 2,
'max_containers': 10000,
'max_deletes_per_request': 10000,
'max_failed_deletes': 1000,
'max_failed_extractions': 1000,
'retry_count': 0,
'retry_interval': 1.5,
'yield_frequency': 10,
}
filter_app = bulk.filter_factory({})(FakeApp())
self.assertEqual(expected_defaults, {k: getattr(filter_app, k)
for k in expected_defaults})
filter_app = bulk.Bulk(FakeApp(), None)
self.assertEqual(expected_defaults, {k: getattr(filter_app, k)
for k in expected_defaults})
def test_delete_concurrency(self):
# Must be an integer
conf = {'delete_concurrency': '1.5'}
self.assertRaises(ValueError, bulk.filter_factory, conf)
conf = {'delete_concurrency': 'asdf'}
self.assertRaises(ValueError, bulk.filter_factory, conf)
# Will be at least one
conf = {'delete_concurrency': '-1'}
filter_app = bulk.filter_factory(conf)(FakeApp())
self.assertEqual(1, filter_app.delete_concurrency)
conf = {'delete_concurrency': '0'}
filter_app = bulk.filter_factory(conf)(FakeApp())
self.assertEqual(1, filter_app.delete_concurrency)
# But if you want to set it stupid-high, we won't stop you
conf = {'delete_concurrency': '1000'}
filter_app = bulk.filter_factory(conf)(FakeApp())
self.assertEqual(1000, filter_app.delete_concurrency)
# ...unless it's extra-stupid-high, in which case we cap it
conf = {'delete_concurrency': '1001'}
filter_app = bulk.filter_factory(conf)(FakeApp())
self.assertEqual(1000, filter_app.delete_concurrency)
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
bulk.filter_factory({})
swift_info = registry.get_swift_info()
self.assertTrue('bulk_upload' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_containers_per_extraction'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_failed_extractions'),
numbers.Integral))
self.assertTrue('bulk_delete' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_deletes_per_request'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_failed_deletes'),
numbers.Integral))
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_bulk.py |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request, HTTPUnauthorized, HTTPOk
from swift.common.middleware import container_quotas, copy
from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
class FakeApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('200 OK', [])
return []
class FakeMissingApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('404 Not Found', [])
return []
def start_response(*args):
pass
class TestContainerQuotas(unittest.TestCase):
def test_split_path_empty_container_path_segment(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a//something/something_else',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': {'key': 'value'}})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_not_handled(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_no_quotas(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': FakeCache({}),
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_invalid_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 400)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 400)
def test_valid_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
def test_delete_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': None})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
def test_missing_container(self):
app = container_quotas.ContainerQuotaMiddleware(FakeMissingApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 404)
def test_auth_fail(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'},
'write_acl': None})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.authorize': lambda *args: HTTPUnauthorized()})
res = req.get_response(app)
self.assertEqual(res.status_int, 401)
class ContainerQuotaCopyingTestCases(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.cq_filter = container_quotas.filter_factory({})(self.app)
self.copy_filter = copy.filter_factory({})(self.cq_filter)
def test_exceed_bytes_quota_copy_verb(self):
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_exceed_counts_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_counts_quota_copy_cross_account_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},
'status': 200, 'object_count': 1}
a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},
'status': 200, 'object_count': 1}
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.infocache': {
'container/a/c': a_c_cache,
'container/a2/c': a2_c_cache}},
headers={'Destination': '/c/o',
'Destination-Account': 'a2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_counts_quota_copy_cross_account_PUT_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},
'status': 200, 'object_count': 1}
a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},
'status': 200, 'object_count': 1}
req = Request.blank('/v1/a2/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.infocache': {
'container/a/c': a_c_cache,
'container/a2/c': a2_c_cache}},
headers={'X-Copy-From': '/c2/o2',
'X-Copy-From-Account': 'a'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_bytes_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_bytes_quota_copy_from_no_src(self):
self.app.register('GET', '/v1/a/c2/o3', HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o3'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_bytes_quota_copy_from_bad_src(self):
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': 'bad_path'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 412)
def test_exceed_counts_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_counts_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_not_exceed_counts_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
if __name__ == '__main__':
unittest.main()
| swift-master | test/unit/common/middleware/test_quotas.py |